query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Instantiate a Function task.
def __init__(self, func, task_loader=None, **kwargs): self.func = func self.task_loader = task_loader super(Function, self).__init__(**kwargs)
[ "def make_task(self, function, *args):\r\n return self.make_task_with_deps(function, [], *args)", "def create_task_from_function(\n cls,\n a_function, # type: Callable\n function_kwargs=None, # type: Optional[Dict[str, Any]]\n function_input_artifacts=None, # ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Summary for every series
def base_summary(series: pd.Series) -> dict: summary = { "frequencies": series.value_counts().to_dict(), "n_records": series.shape[0], "memory_size": series.memory_usage(index=True, deep=True), "dtype": series.dtype, "types": series.map(lambda x: type(x).__name__).value_count...
[ "def summary(self):\n attributes = self.__dict__\n attributes[\"type\"] = self.__class__.__name__\n return pd.Series(attributes)", "def ts_summary(self):\n summary_dict = {}\n summary_dict[\"first_period\"] = self.first_period_in_series()\n summary_dict[\"last_period\"] =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a vector out of a string. Gets a string (e.g. Book), splits it into and returns a vector with all possible ngrams/features.
def create_vector(string): vec = {} words = string.split() for word in words: if len(word) <= NGRAM_SIZE: add(vec, word) else: for i in range(len(word) - NGRAM_SIZE + 1): add(vec, word[i : i + NGRAM_SIZE]) return vec
[ "def make_terms_from_string(s):\n u = s\n return u.split()", "def ngramas(n, string):\n\n ngrams = []\n i = 0\n while i + n < len(string):\n ngrams.append(string[i:i + n])\n i += 1\n\n return ngrams", "def from_string(string):\n return Sentence(string.split(\" \"))", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the minmax similarity of two vectors. Calculates minmax similarity of two vectors vec_x and vec_y.
def minmax(vec_x, vec_y): minsum = 0 maxsum = 0 for ngram in vec_x: if ngram in vec_y: # ngram is in both vectors minsum += min(vec_x[ngram], vec_y[ngram]) maxsum += max(vec_x[ngram], vec_y[ngram]) else: # ngram only in vec_x maxsu...
[ "def m_distance(vec1, vec2):\n return abs(vec1[0] - vec2[0]) + abs(vec1[1] - vec2[1])", "def safe_dotprod(vec1, vec2):\n return min(1.0, max(-1.0, np.dot(normalize(vec1), normalize(vec2))))", "def test_minmax(py_c_vec):\n Vec = vec_mod.Vec\n\n vec_a = Vec()\n vec_b = Vec()\n\n for a, b in MINM...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a feature list of the vector from the string. Turns a given string into a ngram vector and returns its feature list.
def training(string): print("Training...") vec = create_vector(string) print("Selecting features...") feature_list = select_features(vec) print("Done!") return feature_list
[ "def create_vector(string):\n vec = {}\n words = string.split()\n\n for word in words:\n if len(word) <= NGRAM_SIZE:\n add(vec, word)\n else:\n for i in range(len(word) - NGRAM_SIZE + 1):\n add(vec, word[i : i + NGRAM_SIZE])\n\n return vec", "def _par...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a random part of a string. Returns a random part of a string s that has a given length.
def get_random_string(string, length): words = string.split() random_part = random.randint(0, len(words) - length) return "".join(words[random_part : random_part + length])
[ "def random_substring_of_length(string, length):\n\n start_index = random.randint(0, len(string) - length)\n return string[start_index:start_index + length]", "def random_string(length=15):\n\n rand_str = \"\"\n for _ in range(length):\n rand_str += random.choice(\"abcdefABCDEF\" + ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the sky dip model configuration
def set_configuration(self, configuration): if not isinstance(configuration, Configuration): raise ValueError(f"Configuration must be {Configuration} " f"instance. Received {configuration}.") self.configuration = configuration if self.configuration.is_con...
[ "def fset(self, sky_type):\r\n arg_str = p2e._util._convert_args_to_string(\"set.radiance.sky\", \r\n sky_type)\r\n p2e._app.Exec(arg_str)", "def resetSky(self):\n\n self.__init__()", "def set_model( self, model_img ):\n\n\t\ts...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fit the skydip model.
def fit(self, skydip): parameter_order = ['tau', 'offset', 'kelvin', 'tsky'] self.parameters = {} self.errors = {} self.p_opt = None self.p_cov = None self.fitted_values = None self.data = None self.sigma = None self.elevation = None log.d...
[ "def skydip(scans):\n title = Path(scans[0]).name + \" \".join([Path(scan).name.split(\"_\")[4] for scan in scans[1:]])\n\n signal = []\n std = []\n elevation = []\n\n for scan in scans:\n kd = KissData(scan)\n kd.read_data(list_data=[\"A_masq\", \"I\", \"Q\", \"F_tone\", \"F_tl_Az\", \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a fit to elevation with the model.
def fit_elevation(self, elevation): if self.p_opt is None: result = elevation * np.nan else: result = self.value_at(elevation, *self.p_opt) if isinstance(result, units.Quantity): result = result.value return result
[ "def make_elevation_model(interp_elev, model_nodes_z, elevation_cell=30, \n pad=3, res_air=1e12, fill_res=100, res_sea=0.3):\n\n # calculate the max elevation within survey area\n elev_max = interp_elev[pad:-pad, pad:-pad].max()\n\t\n # need to set sea level to 0 elevation\n elev...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a string representation of a given parameter.
def get_parameter_string(self, parameter): if not self.has_converged or self.parameters is None: return None if parameter not in self.parameters: return None fmt = self.get_parameter_format(parameter) unit = self.get_parameter_unit(parameter) value = fmt ...
[ "def __str__(self):\n\n return \"<ExoParameter>: {0}\".format(self.__dict__)", "def format_parameter(self, name):\n return self._param_dict[name].f_format(self._param_dict[name].value)", "def __repr_parameter__(self, name: str, value: Any) -> str:\n return f\"{name}={value!r}\"", "def get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the string format for a given parameter.
def get_parameter_format(cls, parameter_name): formats = { 'tau': '%.3f', 'tsky': '%.1f', 'kelvin': '%.3e' } return formats.get(parameter_name, '%.3e')
[ "def format_parameter(self, name):\n return self._param_dict[name].f_format(self._param_dict[name].value)", "def get_parameter_string(self, parameter):\n if not self.has_converged or self.parameters is None:\n return None\n if parameter not in self.parameters:\n return N...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the parameter unit for the given parameter.
def get_parameter_unit(self, parameter_name): parameter_units = { 'tsky': units.Unit("Kelvin"), 'kelvin': self.data_unit } return parameter_units.get(parameter_name)
[ "def determine_parameter_unit(parameter):\n if isinstance(parameter, qcodes.Parameter):\n return parameter.unit\n else:\n return None", "def parameter_units(self) -> Tuple[str]:", "def get_units(name):\r\n from ..prms import Helper\r\n\r\n help = Helper()\r\n\r\n if name in help.prm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a string representation of the sky dip fit. Returns str
def __str__(self): if not self.has_converged or self.parameters is None: log.warning("The fit has not converged. Try again!") return '' result = [] for parameter in self.parameters.keys(): if parameter in self.fit_for: parameter_string = self....
[ "def _repr_(self):\n return \"Projective hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())", "def __str__(self):\n str = \"Protein dihedral angle model. {} parameters. Temperature: {}\".format(\n self.n_dihedrals,\n self.temperature\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calibrated linear classifier binary model. This model uses a piecewise linear calibration function on each of the real (as opposed to binary) inputs (parametrized) and then combines (sum up) the results. Optionally calibration can be made monotonic. It usually requires a preprocessing step on the data, to calculate the...
def calibrated_linear_classifier(feature_columns=None, model_dir=None, quantiles_dir=None, keypoints_initializers_fn=None, optimizer=None, config=None, ...
[ "def calibrated_class(x_train,x_test,y_train,base_model):\n\n model = base_model\n\n calibrator = CalibratedClassifierCV(model, method='sigmoid',\n cv='prefit')\n calibrated_model = calibrator.fit(x_train, y_train.values.ravel())\n y_prob_cal = calibrated_model.predict_proba(x_test)[:, 1]\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calibrated linear estimator (model) for regression. This model uses a piecewise linear calibration function on each of the inputs (parametrized) and then combine (sum up) the results. Optionally calibration can be made monotonic. It usually requires a preprocessing step on the data, to calculate the quantiles of each u...
def calibrated_linear_regressor(feature_columns=None, model_dir=None, quantiles_dir=None, keypoints_initializers_fn=None, optimizer=None, config=None, ...
[ "def calibrated_linear_classifier(feature_columns=None,\n model_dir=None,\n quantiles_dir=None,\n keypoints_initializers_fn=None,\n optimizer=None,\n config...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For a given WABBIT parameter file, check for the most common stupid errors
def check_parameters_for_stupid_errors( file ): import os # print('~~~~~~~~~~~~~~~~~~~~~ini-file~~~~~~~~~~~') # # read jobfile # with open(file) as f: # # loop over all lines # for line in f: # line = line.lstrip() # line = line.rstrip() # if len(...
[ "def __check_param_files(self):\n files_to_check = [self.u12,\n self.t_2bit,\n self.q_2bit,\n self.cesar_binary,\n self.ref_bed,\n self.chain,\n self.isoforms]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check if a given parameter in the ini file exists or not. can be used to detect deprecated entries somebody removed
def exists_ini_parameter( inifile, section, keyword ): found_section = False found_parameter = False # read jobfile with open(inifile) as f: # loop over all lines for line in f: # once found, do not run to next section if found_section and line[0] == "[": ...
[ "def check_setting_ini_exist():\n if not os.path.exists('setting.ini'):\n with open('setting.ini', 'w') as file:\n file.write('login-timer =\\n')\n file.write('public =\\n')\n file.write('debug =\\n')\n file.write('# gmail account to use for password reset\\n')\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
we look for the latest .h5 files to resume the simulation, and prepare the INI file accordingly. Some errors are caught.
def prepare_resuming_backup( inifile ): import numpy as np import os import glob import flusi_tools # does the ini file exist? if not os.path.isfile(inifile): raise ValueError("Inifile not found!") Tmax = get_ini_parameter(inifile, "Time", "time_max", float) dim = get_ini_para...
[ "def data_processing(camera, date, sequence, sequence_type, \n scanned_parameter='Raman_pulse_time',\n long_image=False, redo_prepare=True, filtering=True):\n \n# sys.path.append('/Users/banano/Documents/UMD/Research/Rashba/Chern/Analysis/')\n# sys.path.append('C:\\Use...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a 2D/3D wabbit file and return a list of how many blocks are at the different levels
def block_level_distribution_file( file ): import h5py import numpy as np # open the h5 wabbit file fid = h5py.File(file,'r') # read treecode table b = fid['block_treecode'][:] treecode = np.array(b, dtype=float) # close file fid.close() # number of blocks Nb = treecode.s...
[ "def depth_read(filename): \n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n assert check == TAG_FLOAT, ' depth_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)\n width = np.fromfile(f,dtype=np.int32,count=1)[0]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a wabbittype HDF5 of blockstructured data. Return time, x0, dx, box, data, treecode. Get number of blocks and blocksize as N, Bs = data.shape[0], data.shape[1]
def read_wabbit_hdf5(file, verbose=True, return_iteration=False): import h5py import numpy as np if verbose: print("~~~~~~~~~~~~~~~~~~~~~~~~~") print("Reading file %s" % (file) ) fid = h5py.File(file,'r') b = fid['coords_origin'][:] x0 = np.array(b, dtype=float) b = fid['c...
[ "def write_wabbit_hdf5( file, time, x0, dx, box, data, treecode, iteration = 0, dtype=np.float64 ):\n import h5py\n import numpy as np\n\n\n Level = np.size(treecode,1)\n if len(data.shape)==4:\n # 3d data\n Bs = np.zeros([3,1])\n N, Bs[0], Bs[1], Bs[2] = data.shape\n Bs = B...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a wabbittype HDF5 of blockstructured data. same as read_wabbit_hdf5, but reads ONLY the treecode array.
def read_treecode_hdf5(file): import h5py import numpy as np fid = h5py.File(file,'r') b = fid['block_treecode'][:] treecode = np.array(b, dtype=float) return treecode
[ "def read_wabbit_hdf5(file, verbose=True, return_iteration=False):\n import h5py\n import numpy as np\n\n if verbose:\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Reading file %s\" % (file) )\n\n fid = h5py.File(file,'r')\n b = fid['coords_origin'][:]\n x0 = np.array(b, dtype=flo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write data from wabbit to an HDF5 file
def write_wabbit_hdf5( file, time, x0, dx, box, data, treecode, iteration = 0, dtype=np.float64 ): import h5py import numpy as np Level = np.size(treecode,1) if len(data.shape)==4: # 3d data Bs = np.zeros([3,1]) N, Bs[0], Bs[1], Bs[2] = data.shape Bs = Bs[::-1] ...
[ "def save_as_hdf5(self, filename):", "def write(data: orm.Data, filename: str) -> None:\n save(to_bands_inspect(data), hdf5_file=filename)", "def test_write(self):\n hdf5io = HDF5IO(self.filename, manager=self.manager, mode='a')\n hdf5io.write(self.nwbfile)\n hdf5io.close()\n # TO...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read all h5 files in directory dir. Return time, x0, dx, box, data, treecode. Use data["phi"][it] to reference quantity phi at iteration it
def read_wabbit_hdf5_dir(dir): import numpy as np import re import ntpath import os it=0 data={'time': [],'x0':[],'dx':[],'treecode':[]} # we loop over all files in the given directory for file in os.listdir(dir): # filter out the good ones (ending with .h5) if file.ends...
[ "def hdf5_all_data(base='/usr/lib/python2.5/site-packages/MAPdata'):\n for dirname, dirs, fnames in os.walk(base):\n\n os.chdir(dirname)\n\n for fname in fnames:\n \n if fname[-4:]=='.asc':\n if not fname[:-4] + '.hdf5' in fnames:\n print 'asc:', ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This generic function adds the local convergence rate as nice labels between
def add_convergence_labels(dx, er): import numpy as np import matplotlib.pyplot as plt for i in range(len(dx)-1): x = 10**( 0.5 * ( np.log10(dx[i]) + np.log10(dx[i+1]) ) ) y = 10**( 0.5 * ( np.log10(er[i]) + np.log10(er[i+1]) ) ) order = "%2.1f" % ( convergence_order(dx[i:i+1+1],er[...
[ "def test_localgp_convergence_with_labels(\n show: bool = False, verbose: bool = False\n) -> None:\n\n random_seed = 1\n x_a, x_b, x, y_a, y_b, y, x_out, x_gt, y_gt = load_data_with_labels(\n random_seed=random_seed\n )\n\n plot_signals(\n [\n (x_a[:, 0], y_a, \"$a$\", {}),\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is a small function that returns the convergence order, i.e. the least squares fit to the log of the two passed lists.
def convergence_order(N, err): import numpy as np if len(N) != len(err): raise ValueError('Convergence order args do not have same length') A = np.ones([len(err), 2]) B = np.ones([len(err), 1]) # ERR = A*N + B for i in range( len(N) ) : A[i,0] = np.log(N[i]) B[i] = np.l...
[ "def logfit(N, err):\n import numpy as np\n\n if len(N) != len(err):\n raise ValueError('Convergence order args do not have same length')\n\n A = np.ones([len(err), 2])\n B = np.ones([len(err), 1])\n # ERR = A*N + B\n for i in range( len(N) ) :\n A[i,0] = np.log10(N[i])\n B[i]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is a small function that returns the logfit, i.e. the least squares fit to the log of the two passed lists.
def logfit(N, err): import numpy as np if len(N) != len(err): raise ValueError('Convergence order args do not have same length') A = np.ones([len(err), 2]) B = np.ones([len(err), 1]) # ERR = A*N + B for i in range( len(N) ) : A[i,0] = np.log10(N[i]) B[i] = np.log10(err[...
[ "def curve_fit_log(xdata, ydata) :\n # Weights according to a log scale\n # Apply fscalex\n xdata_log = np.log10(xdata)\n # Apply fscaley\n ydata_log = np.log10(ydata)\n # Fit linear\n popt_log, pcov_log = curve_fit(linlaw, xdata_log, ydata_log)\n #print(popt_log, pcov_log)\n # Apply fsca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read and plot a 2D wabbit file. Not suitable for 3D data, use Paraview for that.
def plot_wabbit_file( file, savepng=False, savepdf=False, cmap='rainbow', caxis=None, caxis_symmetric=False, title=True, mark_blocks=True, block_linewidth=1.0, gridonly=False, contour=False, ax=None, fig=None, ticks=True, colorbar=True, dpi=300, block_edge_...
[ "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
compute error given two flusi fields
def flusi_error_vs_flusi(fname_flusi1, fname_flusi2, norm=2, dim=2): import numpy as np import insect_tools # read in flusi's reference solution time_ref, box_ref, origin_ref, data_ref = insect_tools.read_flusi_HDF5( fname_flusi1 ) time, box, origin, data_dense = insect_tools.read_flusi_HDF5( fnam...
[ "def calc_ratio_err(spec1,spec2):\n \n result = spec1/spec2\n \n err = result*np.sqrt((1.0/spec1) + (1.0/spec2))\n \n return result,err", "def _compute_error(self,expected_out,actual_out,error_func):\n\n error = error_func(expected_out,actual_out)\n return error", "def calculate_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a WABBIT grid to a full dense grid in a single matrix. We asssume here that interpolation has already been performed, i.e. all blocks are on the same (finest) level.
def to_dense_grid( fname_in, fname_out = None, dim=2 ): import numpy as np import insect_tools import matplotlib.pyplot as plt # read data time, x0, dx, box, data, treecode = read_wabbit_hdf5( fname_in ) # convert blocks to complete matrix field, box = dense_matrix( x0, dx, data, treecode...
[ "def to_basisgrid(self):\n \n bg = basisgrid.BasisGrid()\n \n for sensor in self.leaves:\n if not isinstance(sensor, sensors.PixelArraySensor):\n raise TypeError('basisgrid representation is only compatible '\n 'with detectors that are ent...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare two grids. The number returned is the % of blocks from treecode1 which have also been found in treecode2
def compare_two_grids( treecode1, treecode2 ): import numpy as np common_blocks = 0 for i in range(treecode1.shape[0]): # we look for this tree code in the second array code1 = treecode1[i,:] for j in range(treecode2.shape[0]): code2 = treecode2[j,:] if np....
[ "def gridratio( grid1, grid2):\n\n nx1 = grid1.img_width\n ny1 = grid1.img_height\n nx2 = grid2.img_width\n ny2 = grid2.img_height\n\n ratio = 0.\n rms = 0.\n\n if nx1 != nx2:\n print(\"GridRatio: Nx1 != Nx2 (%d, %d)\" % (nx1, nx2))\n return ratio, rms\n\n if ny1 != ny2:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
On all blocks of the data array, replace any function values by the level of the block
def overwrite_block_data_with_level(treecode, data): if len(data.shape) == 4: N = treecode.shape[0] for i in range(N): level = treecode_level(treecode[i,:]) data[i,:,:,:] = float( level ) elif len(data.shape) == 3: N = treecode.shape[0] for i in range(N...
[ "def leaf_modify(self, func):\n for key, value in self.leaf_items():\n self[key] = func(value)", "def replace_level(self, values):\n self.pyramid[self.levels - 1] = values", "def update(self):\n self.cleanup()\n for block in self.level:\n block.update()", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This routine performs a shell command on each .h5 file in a given directory!
def command_on_each_hdf5_file(directory, command): import re import os import glob if not os.path.exists(directory): err("The given directory does not exist!") files = glob.glob(directory+'/*.h5') files.sort() for file in files: c = command % file os.system(c)
[ "def list_h5(walk_dir):\n\n file_list = []\n for root, subdirs, files in os.walk(walk_dir):\n\n for filename in files:\n file_path = os.path.join(root, filename)\n if file_path[-2:] == 'h5':\n file_list.append(file_path)\n\n return file_list", "def h5(ctx, h5_f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert directory with flusi h5 files to wabbit h5 files
def flusi_to_wabbit_dir(dir_flusi, dir_wabbit , *args, **kwargs ): import re import os import glob if not os.path.exists(dir_wabbit): os.makedirs(dir_wabbit) if not os.path.exists(dir_flusi): err("The given directory does not exist!") files = glob.glob(dir_flusi+'/*.h5') fi...
[ "def convert(data_dir, save_dir='new_data'):\n\n data_path = Path(data_dir)\n save_path = Path(save_dir)\n save_path.mkdir(exist_ok=True)\n save_path = save_path / data_path.stem\n save_path.mkdir(exist_ok=True)\n\n for idx, file_path in enumerate(data_path.glob('*.h5'), start=1):\n new_fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert flusi data file to wabbit data file.
def flusi_to_wabbit(fname_flusi, fname_wabbit , level, dim=2, dtype=np.float64 ): import numpy as np import insect_tools import matplotlib.pyplot as plt # read in flusi's reference solution time, box, origin, data_flusi = insect_tools.read_flusi_HDF5( fname_flusi, dtype=dtype ) box = box[1:] ...
[ "def flusi_to_wabbit_dir(dir_flusi, dir_wabbit , *args, **kwargs ):\n import re\n import os\n import glob\n\n if not os.path.exists(dir_wabbit):\n os.makedirs(dir_wabbit)\n if not os.path.exists(dir_flusi):\n err(\"The given directory does not exist!\")\n\n files = glob.glob(dir_flus...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function creates a _.h5 file with the wabbit block structure from a given dense data matrix. Therefore the dense data is divided into equal blocks, similar as sparse_to_dense option in wabbitpost.
def dense_to_wabbit_hdf5(ddata, name , Bs, box_size = None, time = 0, iteration = 0, dtype=np.float64): # concatenate filename in the same style as wabbit does fname = name + "_%12.12d" % int(time*1e6) + ".h5" Ndim = ddata.ndim Nsize = np.asarray(ddata.shape) level = 0 Bs = np.asarray(Bs)# make ...
[ "def to_dense_grid( fname_in, fname_out = None, dim=2 ):\n import numpy as np\n import insect_tools\n import matplotlib.pyplot as plt\n\n # read data\n time, x0, dx, box, data, treecode = read_wabbit_hdf5( fname_in )\n\n # convert blocks to complete matrix\n field, box = dense_matrix( x0, dx, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert hash_str to hash_dec
def hash2dec(hash_str: str) -> int: length = len(hash_str) bases = [32 ** i for i in range(length)][::-1] dec = 0 for i, d in enumerate(hash_str): dec += ch2int[d] * bases[i] return dec
[ "def get_hash_from_str(str: str) -> str:\n hashing_func = hashlib.sha256\n\n def str2int(s):\n return int(hashing_func(s.encode()).hexdigest(), 16)\n\n return str2int(str)", "def calculate_hash_value(self, string):\n return (ord(string[0]) * 100) + ord(string[1])", "def hash_string(string...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert hash_dec to hash_str
def dec2hash(hash_dec: int, pre: int) -> str: bases = [32 ** i for i in range(pre)][::-1] hash_str = "" v = hash_dec for b in bases: a = v // b v = v % b hash_str += ch32[a] return hash_str
[ "def hash_str(c, hash_length):\n if isinstance(c, float):\n if numpy.isnan(c):\n return c\n raise ValueError(f\"numpy.nan expected, not {c}\")\n m = hashlib.sha256()\n m.update(c.encode(\"utf-8\"))\n r = m.hexdigest()\n if len(r) >= hash_length:\n return r[:hash_length...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert lat, lon coordinate to decimal geohash representation (pre=6)
def coords2geohash_dec(*, lat: float, lon: float, pre: int = 6) -> int: return hash2dec(encoder(lat, lon, pre))
[ "def Geohash(self, digit=10, base=\"0123456789bcdefghjkmnpqrstuvwxyz\"):\n\t\tlongitude = self.longitude*_TODEG\n\t\tlatitude = self.latitude*_TODEG\n\t\treturn geoH.to_geohash(longitude, latitude, digit, base)\n\t\t# min_lon, max_lon = -180., 180.\n\t\t# min_lat, max_lat = -90., 90.\n\t\t# mid_lon, mid_lat = 0., 0...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert decimal geohash to lat, lon coordinate (we require pre=6)
def geohash_dec2coords(*, geohash_dec: int, pre: int = 6) -> Tuple[float, float]: res = decoder(dec2hash(geohash_dec, pre=pre)) return round(sum(res[0]) / 2, max(3, pre - 3)), round( sum(res[1]) / 2, max(3, pre - 3) )
[ "def _decode(geohash):\n lat_val, lng_val, lat_err, lng_err = _decode_val_err(geohash)\r\n precision = _get_precision(lng_err)\n lat_val = \"%.*f\" % (precision, lat_val)\r\n lng_val = \"%.*f\" % (precision, lng_val)\r\n return lat_val, lng_val", "def coords2geohash_dec(*, lat: float, lon: float, p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
uploads file to Google Cloud storage
def _cloud_storage_upload(local_file, bucket, filename_on_bucket): client = storage.Client() bucket = client.get_bucket(bucket) blob = bucket.blob(filename_on_bucket) blob.upload_from_filename(local_file) print('uploaded ', bucket, filename_on_bucket)
[ "def upload_to_gcs():\n client = storage.Client(project=\"filmreccommendations\")\n bucket = client.get_bucket(\"filmreccommendations.appspot.com\")\n blob = bucket.blob(os.path.basename(PICKLE_FILENAME))\n blob.upload_from_filename(PICKLE_FILENAME)", "def gcloud_upload_file(file):\n if not file:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists all the catalystport bindings
def get_all_catalystport_bindings(): LOG.debug("get_all_catalystport_bindings() called") session = db.get_session() try: bindings = session.query (catalyst_models.CatalystPortBinding).all() return bindings except exc.NoResultFound: return []
[ "def get_all_portbindings(context):\n session = context.session\n ports = session.query(ml2_models.PortBinding).all()\n return {port.port_id: _make_port_dict(port)\n for port in ports}", "def list_bindings(self) -> list[tuple[str, BoundCallback, str]]:\n\n return list(self.iter_bindings...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a catalystport binding
def add_catalystport_binding(port_id, vlan_id): LOG.debug("add_catalystport_binding() called") session = db.get_session() binding = catalyst_models.CatalystPortBinding(port_id, vlan_id) session.add(binding) session.flush() return binding
[ "def add_binding(ctx, binding_name, pool_name, acl_name, nat_type, twice_nat_id):\n\n entryFound = False\n table = 'NAT_BINDINGS'\n key = binding_name\n dataKey1 = 'access_list'\n dataKey2 = 'nat_pool'\n dataKey3 = 'nat_type'\n dataKey4 = 'twice_nat_id'\n\n if acl_name is None:\n acl_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes a catalystport binding
def remove_catalystport_binding(vlan_id): LOG.debug("remove_catalystport_binding() called") session = db.get_session() try: binding = (session.query(catalyst_models.CatalystPortBinding). filter_by(vlan_id=vlan_id).all()) for bind in binding: session.delete...
[ "def remove_binding(ctx, binding_name):\n\n entryFound = False\n table = 'NAT_BINDINGS'\n key = binding_name\n\n if len(binding_name) > 32:\n ctx.fail(\"Invalid binding name. Maximum allowed binding name is 32 characters !!\")\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test whether the numpy data type `dt` can be safely cast to an int.
def _safely_castable_to_int(dt): int_size = np.dtype(int).itemsize safe = (np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or ( np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size ) return safe
[ "def is_int(arr, *args):\n return arr.dtype is np.dtype(np.int)", "def type_is_int(value):\n # return type(value) is int or type(value) == np.int32\n return isinstance(value, int) or isinstance(value, np.int32)", "def _is_integer(x):\n return (not isinstance(x, (bool, np.bool))) and \\\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Starts a while loop that sends 'command' to tello every 5 second.
def _sendingCommand(self): while True: self.tello.send_command('command') time.sleep(5)
[ "async def loop(self, ctx, time: int, command: str):\n\n for x in range(time):\n x = self.bot.get_command(command)\n await ctx.invoke(x)", "def run(self):\n while self._running:\n try:\n msg = self.ser.readline()\n if msg != None and len...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open the cmd window and initial all the button and text.
def openCmdWindow(self): panel = Toplevel(self.root) panel.wm_title('Command Panel') # create text input entry text0 = tki.Label(panel, text='This Controller map keyboard inputs to Tello control commands\n' 'Adjust the tra...
[ "def openCmd(self):\n import cmdlin2\n #print \"here openCmd \",self.boardName\n #self.ButConnect.configure(state=\"disabled\")\n if self.io==None:\n cm= os.path.join(self.boardName, self.boardName)\n cm= cm+'.exe '+self.baseAddr\n #print \":\",cm,\":\"\n self.io= cmdlin2.cmdlint(cmd...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open the flip window and initial all the button and text.
def openFlipWindow(self): panel = Toplevel(self.root) panel.wm_title('Gesture Recognition') self.btn_flipl = tki.Button( panel, text='Flip Left', relief='raised', command=self.telloFlip_l) self.btn_flipl.pack(side='bottom', fill='both', expand='ye...
[ "def flip(self):\n if self.number_showing:\n self.image = self.ltr_image\n self.number_showing = False\n else:\n self.image = self.num_image\n self.number_showing = True", "def preview(self, window, flip=True):\n self._window = window\n rect ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the quantization config for transformerbased models.
def _get_transformer_quantization_config(subset_size: int) -> Dict[str, Any]: return { "algorithm": "quantization", "preset": "mixed", "initializer": { "range": {"num_init_samples": subset_size, "type": DEFAULT_RANGE_TYPE}, "batchnorm_adaptation": {"num_bn_adaptation_...
[ "def get_config(self):\n raise NotImplementedError('Quantizer should implement get_config().')", "def _create_quantizers(self, qat_config):\n\n def input_index_to_node(node, tensor_name):\n for index, tensor in enumerate(node.in_tensors):\n if tensor_name == tensor.name:\n return index\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the default quantization config
def _get_default_quantization_config(preset: QuantizationPreset, subset_size: int) -> Dict[str, Any]: return { "algorithm": "quantization", "preset": preset.value, "initializer": { "range": {"num_init_samples": subset_size, "type": DEFAULT_RANGE_TYPE}, "batchnorm_adap...
[ "def get_config(self):\n raise NotImplementedError('Quantizer should implement get_config().')", "def _get_transformer_quantization_config(subset_size: int) -> Dict[str, Any]:\n return {\n \"algorithm\": \"quantization\",\n \"preset\": \"mixed\",\n \"initializer\": {\n \"rang...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates the NNCFConfig for the quantization algorithm.
def _create_nncf_config( preset: QuantizationPreset, target_device: TargetDevice, subset_size: int, model_type: Optional[ModelType], ignored_scope: Optional[IgnoredScope], advanced_parameters: Optional[AdvancedQuantizationParameters], ) -> NNCFConfig: if model_type is None: compressi...
[ "def _create_quantizers(self, qat_config):\n\n def input_index_to_node(node, tensor_name):\n for index, tensor in enumerate(node.in_tensors):\n if tensor_name == tensor.name:\n return index\n return None\n\n def select_rounding_mode(node, tensor_type):\n rounding_mode = 3 if ten...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implementation of the `compress_weights()` method for the PyTorch backend.
def compress_weights(model: torch.nn.Module, use_fake_quantize: bool = False) -> torch.nn.Module: compressed_model, _ = replace_modules_by_nncf_modules(model) insert_pre_compression_operations(model, use_fake_quantize) return compressed_model
[ "def compress(self):\n for op in self.bound_model.get_operations():\n weight_index = _detect_weight_index(op)\n if weight_index is None:\n _logger.warning('Failed to detect weight for layer %s', op.name)\n return\n weight_op = op.inputs[weight_in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an embedded document instance from MongoDB data
def build_from_mongo(cls, data, use_cls=True): # If a _cls is specified, we have to use this document class if use_cls and '_cls' in data: cls = cls.opts.instance.retrieve_embedded_document(data['_cls']) doc = cls() doc.from_mongo(data) return doc
[ "def _to_document(self, document):\n obj = self.document()\n obj._set_from_db(document)\n return obj", "def test_create_document_without_id(self):\n data = {'name': 'julia', 'age': 6}\n doc = self.db.create_document(data)\n self.assertEqual(self.db[doc['_id']], doc)\n self.ass...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Multidimensional Gaussian fourier filter. The array is multiplied with the fourier transform of a Gaussian kernel.
def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None): input = numpy.asarray(input) output = _get_output_fourier(output, input) axis = normalize_axis_index(axis, input.ndim) sigmas = _ni_support._normalize_sequence(sigma, input.ndim) sigmas = numpy.asarray(sigmas, dtype=numpy.float64) i...
[ "def fmgf(array, sigma):\n x, y = np.arange(len(array)), array.copy()\n yg = ndimage.filters.gaussian_filter(y, sigma)\n y -= yg\n\n # digitizing\n m = 101\n dy = 6.0 * mad(y) / m\n ybin = np.arange(np.min(y) - 5 * dy, np.max(y) + 5 * dy + dy, dy)\n z = np.zeros([len(ybin), len(x)])\n z[n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Multidimensional uniform fourier filter. The array is multiplied with the Fourier transform of a box of given size.
def fourier_uniform(input, size, n=-1, axis=-1, output=None): input = numpy.asarray(input) output = _get_output_fourier(output, input) axis = normalize_axis_index(axis, input.ndim) sizes = _ni_support._normalize_sequence(size, input.ndim) sizes = numpy.asarray(sizes, dtype=numpy.float64) if not ...
[ "def fourier_uniform(image, size, n=-1, axis=-1):\n\n # Validate and normalize arguments\n image, size, n, axis = _utils._norm_args(image, size, n=n, axis=axis)\n\n # Get the grid of frequencies\n freq_grid = _utils._get_freq_grid(\n image.shape,\n chunks=image.chunks,\n n=n,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Multidimensional ellipsoid Fourier filter. The array is multiplied with the fourier transform of a ellipsoid of given sizes.
def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None): input = numpy.asarray(input) if input.ndim > 3: raise NotImplementedError("Only 1d, 2d and 3d inputs are supported") output = _get_output_fourier(output, input) axis = normalize_axis_index(axis, input.ndim) sizes = _ni_support._...
[ "def _irfft2d(f_x) :", "def process( fids, ndim=2 ):\n\timg = np.empty_like( fids )\n\tax = -1*(np.array( range(ndim) )+1)\n\t\n\timg = np.fft.fftshift( np.fft.fftn( fids, axes=ax, ).astype( np.complex64), axes=ax )\n\t\n\treturn np.squeeze(img)", "def inverse_fourier2d(x):\n return fft.ifft2(x.reshape((28...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Safe conversion of page to utf
def __init__(self, page): try: self.page = page.encode("utf8") except UnicodeDecodeError: self.page = page.decode('iso-8859-1').encode('utf8')
[ "def _decode_page(cls, page):\r\n contents = page.read()\r\n encoding = cls._get_encoding(page, contents) or sys.getdefaultencoding()\r\n return unicode(contents, encoding=encoding).encode('utf-8')", "def force_unicode (text) :\n exp = re.compile(\"([0-9]+):\")\n turn = 0\n while Tru...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert page to str
def __str__(self): return str(self.page)
[ "def get_page_text(self):\n pass", "def process_page(page):\n content = utils.any2unicode(page, 'utf8').strip()\n content = re.sub(r\"[^a-zA-Z]\", \" \", content)\n \n return content", "def __str__(self):\n res = \"<Page(%s of %s)>\"\n return res % (self._title, str(self.site),)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the type of applying the binary operator with the current type and the type of the right operand, or returns None if the operation is not valid
def binop_type(cls, op, right_type): return None
[ "def get_op_type(self):\n return self.op_type", "def unaryop_type(cls, op):\n return None", "def binary_op(type_spec):\n type_spec = computation_types.to_type(type_spec)\n return reduction_op(type_spec, type_spec)", "def get_operator_type(self):\n return self._operator_type", "def Ope...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the type of applying the unary operator to the current type
def unaryop_type(cls, op): return None
[ "def unary_op(type_spec):\n type_spec = computation_types.to_type(type_spec)\n return computation_types.FunctionType(type_spec, type_spec)", "def unary_operator(op):\n # Only negate is currently supported for all our possible input types.\n valid_ops = {'-'}\n if op not in valid_ops:\n raise Val...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that settings are restored after test_settings_before.
def test_settings_restored(self) -> None: from django.conf import settings assert TestLiveServer._test_settings_before_run is True # type: ignore[attr-defined] assert ( f"{settings.__class__.__module__}.{settings.__class__.__name__}" == "django.conf.Settings" ) ...
[ "def _restore_user_settings(self):\n self._user_settings = self._original_user_settings", "def teardown(self):\n # dump persistent storage to file\n dump_persistent_settings(self.settings_path, self.id_dict)", "def tearDown(self):\r\n settings.REGISTRATION_BACKEND = self.old_backend\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
LiveServer always serves statics with ``django.contrib.staticfiles`` handler.
def test_serve_static_with_staticfiles_app(self, django_testdir, settings) -> None: django_testdir.create_test_module( """ from urllib.request import urlopen from django.utils.encoding import force_str class TestLiveServer: def test_a(self, live_...
[ "def serve_static_files(request, path, insecure=False, **kwargs):\n\n if not settings.DEBUG and not insecure:\n raise Http404\n normalized_path = posixpath.normpath(unquote(path)).lstrip('/')\n absolute_path = finders.find(normalized_path)\n if not absolute_path:\n if path.endswith('/') or...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Because ``django.contrib.staticfiles`` is not installed LiveServer can not serve statics with django >= 1.7 .
def test_serve_static_dj17_without_staticfiles_app(self, live_server, settings) -> None: with pytest.raises(HTTPError): urlopen(live_server + "/static/a_file.txt").read()
[ "def test_serve_static_with_staticfiles_app(self, django_testdir, settings) -> None:\n django_testdir.create_test_module(\n \"\"\"\n from urllib.request import urlopen\n\n from django.utils.encoding import force_str\n\n class TestLiveServer:\n def te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
TextResponse will be not applied by RuleExtractor. Need convert to HtmlResponse
def process_response(request, response, spider): headers = ['text/html; charset=UTF-8', 'text/html; charset=utf-8', 'text/html;charset=UTF-8', 'text/html;charset=utf-8', 'text/html;charset=ISO-8859-1', 'application/xhtml+xml; charset=utf-8'] # log.msg("In Midd...
[ "def _format_response(self, response):\n texts = []\n for result in response.results: \n texts.append(result.alternatives[0].transcript)\n return texts", "def _process_html_response(self, response, action_result):\n\n # An html response, treat it like an error\n statu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests a given component dataframe for convergence, returning True for converged components
def test_component(self, component_dataframe, ignore_weight=False): # define our acceptable bounds skew_range = [-0.6, 0.6] kurt_range = [-1.5, 0.75] # accept shorter tails for bang-on data weight_low = 0.008 # perform weight test first if not ignored if not ignore_wei...
[ "def converged(self) -> bool:", "def checkConvergence(self):\n convergence = True\n for traj in self.optTraj:\n if self.convergeTraj[traj] == False:\n convergence = False\n break\n return convergence", "def test_convergence(self):\n try:\n return self._test_converge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take location (code2,code3,country name) return countryName and coords
def locate(location): coord = None country_name = None if location: location = location.lower() for ind, row in country_map.iterrows(): if ( (re.match(r'(.*\W|\W*){}\b'.format(row['code2']), location)) or(re.match(r'(.*\W|\W*){}\b'.format(row['code3']), location...
[ "def loc_to_coord(codes):\n def adfilter(codes):\n return re.findall(\"\"\"[a-zA-Z]+, [A-Z]{2}\"\"\", \";\".join(codes))\n\n api_key = \"AIzaSyCxQCjOrHFAf7T-W3vtUYqWkgSFkvMjxN4\"\n\n g = geocoders.GoogleV3(api_key = api_key)\n coords = {\"lat\":[], \"long\":[]}\n for code in adfilter(codes):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the steering file to gather user inputs from the GUI of pyRiverBed. Parameters are declared as global variables.
def read_steering(): print('+> Trying to read steering file...', end='') try: d = np.loadtxt('steering.txt', delimiter=',', skiprows=1) print(' [done]') except IOError: print('\nNo steering file found') print('Please provide steering file first\n') job_done() ...
[ "def gui_reader():\n SMW_IP = entryCol.entry0.get()\n FSW_IP = entryCol.entry1.get()\n\n ### Set 5GNR Parameters\n NR5G = VST().jav_Open(SMW_IP,FSW_IP)\n NR5G.Freq = float(entryCol.entry2.get())\n NR5G.SWM_Out = float(entryCol.entry3.get())\n NR5G.NR_Dir ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print a table displaying parameters read from the steering file. Require 'tabulate' library.
def print_para_table(s): if MODE == 1: t = [['Parameter', 'Value', 'Unit'], ['Number of bends', NBENDS, '/'], ['Width', WIDTH, 'm'], ['Depth', DEPTH, 'm'], ['Length', LAMBDA*(NBENDS+1), 'm'], ['Arc wavelength', LAMBDA, 'm'], ['Sl...
[ "def textDefinitionTable(outfile=sys.stdout ,delim=' '):\n o = outfile\n o.write('Parameter'+delim+'Number of bits'+delim+\"\"\"Description\nMessageID\"\"\"+delim+'6'+delim+\"\"\"AIS message number. Must be 8\nRepeatIndicator\"\"\"+delim+'2'+delim+\"\"\"Indicated how many times a message has been repeated...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print a table displaying mean, median and mode of centerline grid size before and after resampling. Require 'tabulate' library.
def print_resamp_table(mean1, median1, mode1, mean2, median2, mode2): t = [['Streamwise\nresolution', 'Before ' +'After\nresampling --> resampling', '\nUnit'], ['Mean', str(mean1) + ' --> ' + str(mean2), 'm'], ['Median', str(median1) + ' --> ' + str(median2), 'm'], ['Mode', ...
[ "def summarize(self, suffix=''):\n if self.train_results:\n all_metrics = sorted(list(set(\n self.train_results.keys()).union(set(self.test_results.keys()))))\n train_all_results = \\\n ['Train'] + \\\n [self._summarize_metric(self.train_resu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print Kinoshita Curve equation. Only work for Mode 1.
def print_eqn(): if sys.stdout.encoding.lower().startswith('utf'): if JS != 0 and JF != 0: print('Eqn: \u03B8=' + str(np.around(THETA0, decimals=6)) + '*sin(2\u03C0s/' + str(np.around(LAMBDA, decimals=6)) + ')\n +' + str(np.around(THETA0**3, decimals=6)) ...
[ "def build_kinoshita():\n if MODE != 1:\n return [], [], [], [], []\n print('MODE 1: GENERATE KINOSHITA CURVE FROM EQUATION is selected')\n print('Kinoshita Curve parameters are read from steering file:')\n print_eqn()\n s = np.linspace(0, NBENDS*LAMBDA, int(NBENDS*LAMBDA/DS) + 1)\n print_p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build Kinoshita Curve (noncomputational part). Only work for Mode 1.
def build_kinoshita(): if MODE != 1: return [], [], [], [], [] print('MODE 1: GENERATE KINOSHITA CURVE FROM EQUATION is selected') print('Kinoshita Curve parameters are read from steering file:') print_eqn() s = np.linspace(0, NBENDS*LAMBDA, int(NBENDS*LAMBDA/DS) + 1) print_para_table(s)...
[ "def self_defined_noisy_circuit() -> 'QEnv':\n # Create environment\n env = QEnv()\n # Choose backend Baidu local simulator\n env.backend(BackendName.LocalBaiduSim2)\n\n # Number of qubits, no larger than 20 \n num_qubit = 13\n # Number of gates in each for loop\n gate_num = 3 # Depth of c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read river centerline coordinates from userprepared centerline file. Centerline is then resampled to prevent ununiform spacing. Only work for Mode 2.
def read_centerline(s, x, y, cur, theta): if MODE == 2: print('MODE 2: READ YOUR OWN RIVER CENTERLINE FROM FILE is selected') try: centerlinexy = np.loadtxt(FNAME) except IOError: print('\'' + FNAME + '\' not found') print('Please place \'' + FNAME + '\' i...
[ "def find_centre(path_file, rows, cols):\n file_ANG_txt = open(path_file)\n somme_latitude = 0\n for line in file_ANG_txt:\n list_line = line.split(\" \")\n if len(list_line) >= 7 and (list_line[4] == \"CORNER_UL_LAT_PRODUCT\" or list_line[4] == \"CORNER_UR_LAT_PRODUCT\" or list_line[4] == \"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extend centerline to have straight channels at both ends.
def extend_centerline(s, x, y, cur, theta): print('+> Extending centerline to have straight channels at both ends...', end='') if MODE == 1: extlength = LAMBDA/10 d = DS elif MODE == 2: extlength = WIDTH d = INTERVAL num = int(extlength/d) coshead = (x[1...
[ "def centerline(self):\n return self.__centerline", "def centerAxis():\n dislin.center()", "def linecenter(l):\n return scale3(add(l[0],l[1]),0.5)", "def center(self) -> Line:\n return Line(self.shape.pos, self.shape.pos + self.velocity)", "def update_end_points(self):\n \"\"\" Mu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Smooth centerline using Savitzky–Golay filter (5point quadratic polynomial method).
def smooth_centerline(x, y): n = SMOLEV if SMOLEV < 39 else int(np.around(1.1**SMOLEV,decimals=0)) xa, xb, ya, yb = x[0], x[-1], y[0], y[-1] for i in range(n): x = savgol_filter(x, 5, 2, mode='nearest') y = savgol_filter(y, 5, 2, mode='nearest') x[0], x[-1], y[0], y[-1] = xa, xb, ya,...
[ "def _smooth_savitzky_golay(self,y, window_size=51, order=3, deriv=0, rate=1):\n import numpy as np\n from math import factorial\n\n try:\n window_size = np.abs(np.int(window_size))\n order = np.abs(np.int(order))\n except ValueError:\n raise (ValueError,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter the curvature signal by the Savitzky–Golay filter (5point quadratic polynomial method) and 5point moving average.
def filter_curvature(cur, t): if np.mod(t, LPRINT) == 0: print('+> Filtering curvature (5-pt Savitzky–Golay + 5-pt Moving Average)...', end='') cur = savgol_filter(savgol_filter(cur, 5, 2, mode='nearest'), 5, 1, mode='nearest') if np.mod(t, LPRINT) == 0: print(' [done]') return cur
[ "def savitzky_golay_filter(intensity, method, waveNumbers, smoothingPoints, orderPolynomialFit, kwargs):\n \n \n if (method is None):\n return copy.deepcopy(intensity)\n \n \n elif method == \"first_derivative\":\n result = first_derivative(waveNumbers, intensity, smoothingPoints, or...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Impose a phase lag to the curvature signal by replacing the local curvature with the upstreamwise moving averaged curvature.
def lag(s, cur, t): if LAG == 0: return cur else: if MODE == 1: num = int(WIDTH*LAGSTR/DS) elif MODE == 2: num = int(WIDTH*LAGSTR/np.mean(np.diff(s))) if np.mod(t, LPRINT) == 0: print('+> Adding phase lag to local curvature...', end='') cur = compu...
[ "def _updateV(self, ang):\n t = now()\n if self._lastA is not None:\n da = ang - self._lastA\n dt = t - self._lastT\n vNew = da / dt * 60.0 # angle is in rotations, dt seconds\n # If no previous estimate --> use current estimate\n if isnan(self._...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute left and right offset polylines of centerline with an offset distance of L. Thank Y. Luo for improving the offsetting method.
def offset(x, y, L): length = x.size offsetx = np.zeros((length, 2)) offsety = np.zeros((length, 2)) dx = np.zeros(length-1) dy = np.zeros(length-1) dxL = np.zeros(length-1) dyL = np.zeros(length-1) xl = np.zeros(length) # counterclockwise xr = np.zeros(length) # clockwise yl = n...
[ "def process_laneOffset(self):\n center_line = np.poly1d(np.mean([self.line_l.get_LinePoly().coeffs, self.line_r.get_LinePoly().coeffs], axis=0))\n # store the center line polynomial\n self.center_poly = center_line\n center_point = IMAGE_WIDTH/2 - center_line(709)\n offset_from_c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build and write the finite element mesh (noncompuational).
def write_mesh_file(allxyz, beck_bed): if SAVEMESH: print('+> Saving finite element mesh files...', end='') fname = FNAME.rsplit('.', 1)[0] ncol = beck_bed[0,:].size nrow = beck_bed[:,0].size nele = (nrow-1)*(ncol-1)*2 d = compute_mesh(nrow, ncol, nele) h = ':...
[ "def create_dolfin_mesh(self, *args):\n N = len(self.x)\n\n mesh = dlf.Mesh(*args)\n editor = dlf.MeshEditor()\n editor.open(mesh, 'hexahedron', 3, 3)\n mat_vert = np.full((N, N, N), -1)\n\n def get_vertex_id(mat_vert, xi, yi, zi):\n \"\"\"\n input: \n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a rustanalyzer compatible rustproject.json file.
def generate_rust_project_json(self) -> None: if not self.rust_crates: return with open(os.path.join(self.environment.get_build_dir(), 'rust-project.json'), 'w', encoding='utf-8') as f: json.dump( { "sysroot_src": os.path.join...
[ "def createproject(destinationdir):\n print(f\"Writing json data files to {destinationdir}\")\n return", "def create_project_file(config):\n core_dir = get_coregen_dir(config, absolute = True)\n cp_fn = os.path.join(core_dir, COREGEN_PROJECT_NAME)\n fp = open(cp_fn, \"w\")\n\n #Open up the templ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Splits the target's sources into .vala, .gs, .vapi, and other sources. Handles both preexisting and generated sources. Returns a tuple (vala, vapi, others) each of which is a dictionary with the keys being the path to the file (relative to the build directory) and the value being the object that generated or represents...
def split_vala_sources(self, t: build.BuildTarget) -> \ T.Tuple[T.MutableMapping[str, File], T.MutableMapping[str, File], T.Tuple[T.MutableMapping[str, File], T.MutableMapping]]: vala: T.MutableMapping[str, File] = OrderedDict() vapi: T.MutableMapping[str, File] = Ordered...
[ "def sources2objects(sources, target, ext, objSuffix=''):\n import reader, xmlparser\n \n # It's a bit faster (about 10% on wxWindows makefiles) to not parse XML\n # but construct the elements tree by hand. We construc the tree for this\n # code:\n #\n #code = \"\"\"\n #<makefile>\n #<%s ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Vala is compiled into C. Set up all necessary build steps here.
def generate_vala_compile(self, target: build.BuildTarget) -> \ T.Tuple[T.MutableMapping[str, File], T.MutableMapping[str, File], T.List[str]]: (vala_src, vapi_src, other_src) = self.split_vala_sources(target) extra_dep_files = [] if not vala_src: raise InvalidArguments(f...
[ "def setup_glibc():\n if not os.path.exists(glibc_build_dir):\n docmd(\"mkdir %s\" % glibc_build_dir)\n glibc_subdir = \"glibc-%s\" % glibc_version\n if not os.path.exists(glibc_subdir):\n docmd(\"wget http://ftpmirror.gnu.org/glibc/\"\n \"%s.tar.bz2\" % glibc_subdir)\n docmd(\"tar jxf %s.tar.b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate rules for transpiling Cython files to C or C++
def generate_cython_transpile(self, target: build.BuildTarget) -> \ T.Tuple[T.MutableMapping[str, File], T.MutableMapping[str, File], T.List[str]]: static_sources: T.MutableMapping[str, File] = OrderedDict() generated_sources: T.MutableMapping[str, File] = OrderedDict() cython_source...
[ "def _convert_pyx_sources_to_lang(self):\n if _have_cython():\n # the build has Cython, so allow it to compile the .pyx files\n return\n lang = self.language or ''\n target_ext = '.cpp' if lang.lower() == 'c++' else '.c'\n sub = functools.partial(re.sub, '.pyx$', ta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper method to get rsp options. rsp_file_syntax() is only guaranteed to be implemented if can_linker_accept_rsp() returns True.
def _rsp_options(self, tool: T.Union['Compiler', 'StaticLinker', 'DynamicLinker']) -> T.Dict[str, T.Union[bool, RSPFileSyntax]]: options = {'rspable': tool.can_linker_accept_rsp()} if options['rspable']: options['rspfile_quote_style'] = tool.rsp_file_syntax() return options
[ "def _get_ipopt_options(self):\n try:\n with open('ipopt.opt') as f:\n options = f.read()\n except IOError:\n options = ''\n return options", "def extract_options(self):\n self.extract_type()\n self.extract_path()\n self.extract_repo()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
scan a Fortran file for dependencies. Needs to be distinct from target to allow for recursion induced by `include` statements.er It makes a number of assumptions, including `use`, `module`, `submodule` name is not on a continuation line Regex `incre` works for `include "foo.f90"` and `include "foo.f90"` `usere` works f...
def _scan_fortran_file_deps(src: Path, srcdir: Path, dirname: Path, tdeps, compiler) -> T.List[str]: incre = re.compile(FORTRAN_INCLUDE_PAT, re.IGNORECASE) usere = re.compile(FORTRAN_USE_PAT, re.IGNORECASE) submodre = re.compile(FORTRAN_SUBMOD_PAT, re.IGNORECASE) mod_files = [] src = Path(src) ...
[ "def parse_deps():\n Files = []\n Dependencies = []\n TimeBins = ['recover_parameters', 'startup', 'wragh', 'paramcheck',\n 'preregridinitial', 'postregridinitial', 'basegrid', \n 'initial', 'postinitial', 'postrestrictinitial', \n 'postpostinitial', 'recover_va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a histogram over all relationships in a graph
def count_relations(graph): return Counter( data[RELATION] for _, _, data in graph.edges_iter(data=True) )
[ "def relation_distribution(graph: DGLGraph):\n relations = graph.edata['e_label']\n r_unique = relations.unique(sorted=True)\n r_unique_count = torch.stack([(relations == r_u).sum() for r_u in r_unique])\n\n r_uni_count = torch.cat([r_unique.unsqueeze(dim=-1),\n r_unique_coun...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes a dict that accumulates the values for each key in an iterator of doubles
def group_dict_set(iterator): d = defaultdict(set) for key, value in iterator: d[key].add(value) return dict(d)
[ "def add_densities(densities):\n\n return {spin: sum(np.array(dens[spin]) for dens in densities) \n for spin in densities[0].keys()}", "def build_histogram(iterator, key):\n buckets = defaultdict(int)\n values = {}\n\n num_objects = 0\n for obj in iterator:\n num_objects += 1\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a histogram of the different types of relations present in a graph.
def count_unique_relations(graph): return Counter(itt.chain.from_iterable(get_edge_relations(graph).values()))
[ "def count_relations(graph):\n return Counter(\n data[RELATION]\n for _, _, data in graph.edges_iter(data=True)\n )", "def relation_distribution(graph: DGLGraph):\n relations = graph.edata['e_label']\n r_unique = relations.unique(sorted=True)\n r_unique_count = torch.stack([(relations...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts how many times each annotation is used in the graph
def count_annotations(graph): return Counter(_annotation_iter_helper(graph))
[ "def count_annotation_values(graph, annotation):\n return Counter(iter_annotation_values(graph, annotation))", "def annotation_count(content_object):", "def get_annotation_count(graph_name):\n graph = generate_graph(CharmeMiddleware.get_store(), graph_name)\n anno = graph.triples((None, None, OA['Annot...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the set of annotations used in the graph
def get_annotations(graph): return set(_annotation_iter_helper(graph))
[ "def annotations(self):\n return self._annotations", "def annotations(self):\n\n return self._annotations", "def list_annotation_names(self):\n if \"annotations\" not in self.http_manifest:\n return []\n return self.http_manifest[\"annotations\"].keys()", "def annotation...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the set of all annotations that are defined in a graph, but are never used.
def get_unused_annotations(graph): return graph.defined_annotation_keywords - get_annotations(graph)
[ "def get_annotations(graph):\n return set(_annotation_iter_helper(graph))", "def get_annotation_values(graph, annotation):\n return set(iter_annotation_values(graph, annotation))", "def get_unused_list_annotation_values(graph):\n result = {}\n for annotation, values in graph.annotation_list.items():...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets all of the unused values for list annotations
def get_unused_list_annotation_values(graph): result = {} for annotation, values in graph.annotation_list.items(): used_values = get_annotation_values(graph, annotation) if len(used_values) == len(values): # all values have been used continue result[annotation] = set(values)...
[ "def get_unused_annotations(graph):\n return graph.defined_annotation_keywords - get_annotations(graph)", "def get_annotation_values(graph, annotation):\n return set(iter_annotation_values(graph, annotation))", "def get_values(self):\n return self._blacklist.get_values()", "def get_invalid_values...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets annotation/value pairs for values for whom the search string is a substring
def get_annotations_containing_keyword(graph, keyword): return [ { 'annotation': annotation, 'value': value } for annotation, value in iter_annotation_value_pairs(graph) if keyword.lower() in value.lower() ]
[ "def search_for_substrings_in_list(substring, list_of_stuff):", "def parse_search_string(string):\n criteria = {}\n for match in CRITERION_RX.finditer(string):\n # TODO what if there are several of the same match!\n # TODO the cli needs to do append too\n field = match.group('field') or...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts in how many edges each annotation appears in a graph
def count_annotation_values(graph, annotation): return Counter(iter_annotation_values(graph, annotation))
[ "def count_annotations(graph):\n return Counter(_annotation_iter_helper(graph))", "def annotation_count(content_object):", "def get_annotation_count(graph_name):\n graph = generate_graph(CharmeMiddleware.get_store(), graph_name)\n anno = graph.triples((None, None, OA['Annotation']))\n count = 0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all values for the given annotation
def get_annotation_values(graph, annotation): return set(iter_annotation_values(graph, annotation))
[ "def all_annotated_formulae(self, annotation, value=None):\n res = []\n for f, amap in self._annotations.items():\n # MG: Revisit this code. Can this be simplified?\n if annotation in amap:\n if value is None:\n res.append(f)\n els...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts in how many edges each annotation appears in a graph, but filter out source nodes and target nodes
def count_annotation_values_filtered(graph, annotation, source_filter=None, target_filter=None): source_filter = keep_node_permissive if source_filter is None else source_filter target_filter = keep_node_permissive if target_filter is None else target_filter return Counter( data[ANNOTATIONS][annota...
[ "def count_annotations(graph):\n return Counter(_annotation_iter_helper(graph))", "def count_nodes_of_type_for_nodes_that_connect_to_label(source_name, source_label, target_label, node_label_list, relationship_label_list, node_of_interest_position, debug=False, session=session):\n\ttemp_rel_list = list(reverse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterates over unique nodenode pairs in the graph
def _iter_pairs(graph): for u, v in set(graph.edges_iter()): yield u, v
[ "def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target", "def complete_graph(players):\n for player1_index, _ in enumerate(players):\n for player2_index in range(player1_index, len(players)):\n y...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns if the set of relations contains a contradiction
def relation_set_has_contradictions(relations): has_increases = any(relation in CAUSAL_INCREASE_RELATIONS for relation in relations) has_decreases = any(relation in CAUSAL_DECREASE_RELATIONS for relation in relations) has_cnc = any(relation == CAUSES_NO_CHANGE for relation in relations) return 1 < sum([...
[ "def relation_set_has_contradictions(relations: Collection[str]) -> bool:\n has_increases = any(relation in CAUSAL_INCREASE_RELATIONS for relation in relations)\n has_decreases = any(relation in CAUSAL_DECREASE_RELATIONS for relation in relations)\n has_cnc = any(relation == CAUSES_NO_CHANGE for relation i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if a pair of nodes has any contradictions in their causal relationships.
def pair_has_contradiction(graph, u, v): relations = get_all_relations(graph, u, v) return relation_set_has_contradictions(relations)
[ "def relation_set_has_contradictions(relations: Collection[str]) -> bool:\n has_increases = any(relation in CAUSAL_INCREASE_RELATIONS for relation in relations)\n has_decreases = any(relation in CAUSAL_DECREASE_RELATIONS for relation in relations)\n has_cnc = any(relation == CAUSES_NO_CHANGE for relation i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterates over contradictory node pairs in the graph based on their causal relationships
def get_contradictory_pairs(graph): for u, v in _iter_pairs(graph): if pair_has_contradiction(graph, u, v): yield u, v
[ "def get_chaotic_pairs(graph):\n cg = get_causal_subgraph(graph)\n\n results = set()\n\n for u, v, d in cg.edges_iter(data=True):\n if d[RELATION] not in CAUSAL_INCREASE_RELATIONS:\n continue\n\n if cg.has_edge(v, u) and any(dd[RELATION] in CAUSAL_INCREASE_RELATIONS for dd in cg.ed...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a counter of all of the mentions of pathologies in a network
def count_pathologies(graph): return Counter(_pathology_iterator(graph))
[ "def count_relations(graph):\n return Counter(\n data[RELATION]\n for _, _, data in graph.edges_iter(data=True)\n )", "def count_unique_relations(graph):\n return Counter(itt.chain.from_iterable(get_edge_relations(graph).values()))", "def count_paths(self, grid):\n if len(grid) == 0:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
builds the url to get the static map. puts a marker on the start and end locations. assumes start and end are in a format / have enough info to give a proper location. does clean white spaces tho
def find_map(start, end, *otherlocs): small = "200x200" large = "512x512" start = start.replace(" ","+") end = end.replace(" ","+") small_url = g_api_base_url + static_url + small + map_type_url + small_marker_url + start + map_concat + end big_url = g_api_base_url + static_url + large + map_type_url + mark...
[ "def get_static_map(start_lng, start_lat, end_lng, end_lat):\r\n geojson_str = get_map_directions(start_lng, start_lat, end_lng, end_lat)\r\n return (\r\n f\"https://api.mapbox.com/styles/v1/mapbox/streets-v11/static/\"\r\n f\"geojson({geojson_str})/auto/640x640?access_token={MAPBOX_TOKEN}\"\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }