query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Creates bottleneck tensor for a given style image.
def run_style_predict(self, style_image): # The style image has to be preprocessed to (1, 256, 256, 3) preprocessed_style_image = cv_utils.preprocess(style_image, self.style_predict_executor.get_data_type(), self.style_predict_executor.get_shape(), ...
[ "def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n bottleneck_tensor):\n print('Creating bottleneck at ' + bottleneck_path)\n image_path = get_image_path(image_lists, label_name, index,\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append a string to the self.message string.
def appendMsg(self, msg): # self.message += msg theTime = self.logger.mytime() # self.message += theTime + " " + str( msg ) self.message = str(self.message) + str(theTime) + " " + str(msg)
[ "def rawAppend(self, data):\n self.message = self.message + data", "def append_message(self, a_sender, a_message):\n if a_sender is None:\n message = \"Unknown object >>> \" + str(datetime.datetime.now()) + \" >>> \"\n else:\n message = a_sender.__class__.__name__ + \" >...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the NNNN number from the recvData.
def getMsgNumber(self): wData = self.recvData self.logIt(__name__ + ".getMsgNumber(): wData=" + str(wData) + "\n") msgNum = "" msgNum2 = None for i in range(0, len(str(wData))): if wData is None: break if str(wData[i]).isdigit(): ...
[ "def recvn(self, n):\n data = []\n while len(data) != n:\n data.append(self.recv(1))\n\n return b''.join(data)", "def nfmc_no(self):\n return self._nfmc_no", "def ReadChunkNum(self):\n numbin = self.Read(4)\n num = struct.unpack('>L', numbin)[0]\n return num", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the file data to be returned to the client. The file is assummed to bi in the current working directory. The file name will be name NNNN.txt where NNNN is the first 4 bytes of the received data. The rest of the received data is ignored.
def getFileData(self): # fileName = "./0000.txt" self.logIt(__name__ + ".getFileData(): data=" + str(self.recvData) + "\n") msgNum = self.getMsgNumber() if msgNum is None: return "" # fileName = "%-04.4d" % (msgNum ) + ".txt" fileName = "./files/" + str(m...
[ "def get_file_data(filename):", "def get_file(self, path):\n file = self.get('data_request?id=file&parameters=%s' % path)\n return file", "def get_file():\n fname = get_var(request, \"fname\")\n return open(fname).read()", "def get(file: str, addr: tuple) -> bytes:\n assert type(file) =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the counts file.
def updateCounts(self): found = False fileName = "counts" if not os.access(fileName, os.F_OK): try: TFH = open(fileName, "w") TFH.close() except IOError as inst: # @UnusedVariable self.logIt(__name__ + ".updateCounts(): Una...
[ "def update_counts(self) -> None:\n ...", "def update_count(self):\n count_metrics = self._fetch_count_metrics_and_clear()\n self._logger.info('update_count. count_metrics = %s',\n build_metrics_counter_data(count_metrics))", "def update_usage_count(file_path):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cut the input spectrum to the desired frequency range. It appends zero outside the desired frequency range.
def cut_spectrum(input_spectrum, desired_frequency_range): channels_ip = [] for ip in input_spectrum.GetChannels(): channel_ip = [] channel_op = [] for n, i in enumerate(ip): if n > desired_frequency_range[0] / input_spectrum.GetResolution() and n < desired_frequency_range[1]...
[ "def spectrum_cut(spectrum, eVrange=(0.0, 0.0)):\n if eVrange[1] == 0.0:\n return spectrum\n else:\n if spectrum[-1,0] <= eVrange[0] or spectrum[0,0] >= eVrange[1]:\n return np.array([[0, 0]], dtype=np.float)\n else:\n idx1 = np.argmax(spectrum[:,0] >= eVrange[0])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Appends zeros until the signal has the given length. If no length is given, zeros will be appended until the length is a power of 2.
def append_zeros(input_signal, length=None): if length is None: length = 2 ** int(math.ceil(math.log(len(input_signal), 2))) zeros = length - len(input_signal) result = sumpf.Signal(channels=tuple([c + (0.0,) * zeros for c in input_signal.GetChannels()]), samplingrate=input...
[ "def pad(signal, new_length, end):\n assert len(signal) > 1 and len(signal[0]) > 1\n signal = np.array(signal)\n if len(signal) < new_length:\n zero_row = np.zeros(len(signal[0]))\n zero_row = np.array([zero_row])\n count = 0\n while len(signal) < new_length:\n if end...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the first output signal.
def GetFirstOutput(self): return self.__output_signal1
[ "def GetSecondOutput(self):\n return self.__output_signal2", "def take_first_non_signal(self):\n tmp = self.input_queue.get()\n if not isinstance(tmp,ComputerSignals):\n return int(tmp)\n else:\n return self.take_first_non_signal()", "def current_signal():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the second output signal.
def GetSecondOutput(self): return self.__output_signal2
[ "def GetFirstOutput(self):\n return self.__output_signal1", "def SetSecondInput(self, input_signal2):\n self.__input_signal2 = input_signal2\n self._changelength()", "def get_signal(self):\n return self._get_signal(self.y_pred)", "def enable_signal_output(self):\n self.sigge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the first input signal.
def SetFirstInput(self, input_signal1): self.__input_signal1 = input_signal1 self._changelength()
[ "def _set_signal(self, signal):\n self[signal.nxname].signal = NXattr(1)", "def SetSecondInput(self, input_signal2):\n self.__input_signal2 = input_signal2\n self._changelength()", "def setInitialSig(self, sig):\n self.initialSig = [int(a) for a in sig]", "def set_input(self, new_i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the second input signal.
def SetSecondInput(self, input_signal2): self.__input_signal2 = input_signal2 self._changelength()
[ "def SetFirstInput(self, input_signal1):\n self.__input_signal1 = input_signal1\n self._changelength()", "def GetSecondOutput(self):\n return self.__output_signal2", "def enable_signal_output(self):\n self.siggen1.set_output_state(True)\n self.siggen2.set_output_state(True)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A function to change the length of signal. If the length of the signal is greater than the length then signal length is truncated, Else zeros are added to the signal.
def change_length_signal(signal, length=None): if length is None: length = len(signal) if len(signal) >= length: signal = sumpf.modules.CutSignal(signal=signal, start=0, stop=length).GetOutput() else: signal = append_zeros(signal, length) return signal
[ "def truncate(self, length):\n \n self.sequence = self.sequence[:length]", "def pad(signal, new_length, end):\n assert len(signal) > 1 and len(signal[0]) > 1\n signal = np.array(signal)\n if len(signal) < new_length:\n zero_row = np.zeros(len(signal[0]))\n zero_row = np.array([zero_ro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Smooth the spectrum of the filter kernels, to make it suitable for curve fitting algorithm.
def smooth_filter_kernels(kernels=None, window_size=53, polynomial_order=3): kernels_smooth = [] for kernel in kernels: kernel_spec = sumpf.modules.FourierTransform(kernel).GetSpectrum() kernel_spec_channel = kernel_spec.GetChannels()[0] kernel_spec_channel_smooth = savitzky_golay(kernel...
[ "def smooth(spectra, filter_win, window_type='flat', mode='reflect'):\n\n if window_type == 'flat':\n window = np.ones(filter_win)\n else:\n window = scipy.signal.windows.get_window(window_type, filter_win)\n window = window / np.sum(window)\n\n for column in range(spectra.shape[1]):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the linearly weighted spectrum.
def linearweighting(input): if isinstance(input, (sumpf.Signal)): ip = sumpf.modules.FourierTransform(signal=input).GetSpectrum() else: ip = input dummy = 0.0001 while True: dummy = dummy + 0.0001 low = 1 * (dummy ** 1) high = 1 * (dummy ** (len(input) - 1)) ...
[ "def linear_weighted(value):\r\n n = np.arange(600, dtype='float32')\r\n dist = np.abs(n-value)\r\n normed = dist / np.mean(dist)\r\n return normed", "def weightedpower(time, signal, weight, freq):\n\n result = np.zeros(len(freq))\n\n for i in range(len(freq)):\n if (freq[i] != 0.0):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split the corpus into k equally sized ranges.
def Split(self, k): n = len(self) start = range(0, n, ceil(n / k)) end = list(start[1:]) + [n] return [range(first, last) for first, last in zip(start, end)]
[ "def split_array_ranges(length, k):\n\tchunks = []\n\tstep = int(length/k)\n\tstart_ind = 0\n\tend_ind = step\n\twhile end_ind < length:\n\t\tchunks.append((start_ind, end_ind))\n\t\tstart_ind = end_ind\n\t\tend_ind += step\n\tchunks.append((start_ind, length))\n\treturn chunks", "def chunks(l, k):\n n = len(l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
clean dataframe df_airport_code and return a dataframe
def clean_airport_code(spark, input_data): try: #read file df_airport_code = spark.read.option("header","true").option("recursiveFileLookup","true").parquet(input_data+'airport-codes_csv') # drop columns # filter closed , heliport and seaplace base airport, small_airport # k...
[ "def cleaning_stationdata(df):", "def clean_data(df, start = 1995, stop = 2018):\n country_iso3_code = pd.read_html('https://unstats.un.org/unsd/methodology/m49/')\n country_iso3_code = country_iso3_code[0]['ISO-alpha3 code']\n df = df.loc[df.country_iso3_code.isin(country_iso3_code)]\n df = df.set_in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
clean dataframe df_global_airports and return a dataframe
def clean_global_airports(spark, input_data): try: #read file df_global_airports = spark.read.option("header","true").csv(input_data+'airports-extended.csv') drop_cols = ["icao","type", "latitude", "longitude", "altitude", "timezone", "dst", "tz_timezone", "data_source"] newdf = df_g...
[ "def cleaning_stationdata(df):", "def i94_airports(spark, df):\n df.createOrReplaceTempView('i94_airports')\n airports = spark.sql(\"\"\"\n SELECT\n DISTINCT\n STRING(ident) AS airport_id,\n type AS airport_type,\n name AS airpot_name,\n elevatio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
clean dataframe df_iso_country and return a dataframe
def clean_iso_country(spark, input_data): try: #read file df_iso_country = spark.read.option("header","true").csv(input_data+'wikipedia-iso-country-codes.csv') df = (df_iso_country.withColumnRenamed('English short name lower case','country_name') \ .withColumnRena...
[ "def clean_data(df, start = 1995, stop = 2018):\n country_iso3_code = pd.read_html('https://unstats.un.org/unsd/methodology/m49/')\n country_iso3_code = country_iso3_code[0]['ISO-alpha3 code']\n df = df.loc[df.country_iso3_code.isin(country_iso3_code)]\n df = df.set_index(['indicator', 'country_iso3_cod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
clean dataframe df_demograph and return a dataframe
def clean_demograph(spark, input_data): try: #read file df_demograph = spark.read.option("header","true").option("recursiveFileLookup","true").parquet(input_data+'us-cities-demographics') drop_cols = ["Number_of_Veterans"] newdf = df_demograph.drop(*drop_cols) \ ....
[ "def cleaning_stationdata(df):", "def Clean_df(cls,dataframe: pd.DataFrame =None)-> pd.DataFrame:\n #reset index\n dataframe.index=range(dataframe.shape[0])\n #delete columns added by featuretools\n if '...' in dataframe.columns:\n dataframe.drop(columns='...',inplace=True)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes ``log(exp(x) + exp(y))`` in a numerically stable way.
def logaddexp(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return torch.max(x, y) + torch.log(1 + torch.exp(-torch.abs(y - x)))
[ "def logsumexp(x, y):\n max_ent = numpy.maximum(x, y)\n mask = (max_ent == LOG_ZERO)\n # If entries have -Inf values for both operands, use a bias of 0 in those\n # entries\n if mask.any():\n numpy.place(max_ent, mask, 0.0)\n return max_ent + quiet_log(numpy.exp(x - max_ent) + numpy.exp(y -...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes ``tensor.exp().sum(dim, keepdim).log()`` in a numerically stable way.
def logsumexp(tensor: torch.Tensor, dim: Optional[int] = None, keepdim: bool = False) -> torch.Tensor: if dim is None: tensor = tensor.reshape(-1) dim = -1 inputs_max = tensor.max(dim=dim, keepdim=True)[0] tensor = tensor - inputs_max if not keepdim: inputs_max = inputs_max.sque...
[ "def log_sum_exp(value, dim=None, keepdim=False):\n if dim is not None:\n m, _ = torch.max(value, dim=dim, keepdim=True)\n value0 = value - m\n if keepdim is False:\n m = m.squeeze(dim)\n return m + torch.log(torch.sum(torch.exp(value0), dim=dim, keepdim=keepdim))\n else...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes ``torch.matmul(mat1.exp(), mat2.exp()).log()`` in a numerically stable way.
def logmatmulexp(mat1: torch.Tensor, mat2: torch.Tensor, use_mm: bool = False) -> torch.Tensor: mat1_shape = mat1.size() mat2_shape = mat2.size() mat1 = mat1.contiguous().view(-1, mat1_shape[-1]) mat2 = move_dim(mat2, 0, -1) mat2 = mat2.contiguous().view(-1, mat2_shape[0]) if use_mm: ma...
[ "def batch_logmatmulexp(mat1: torch.Tensor, mat2: torch.Tensor, use_mm: bool = False) -> torch.Tensor:\n mat1_shape = mat1.size()\n mat2_shape = mat2.size()\n mat1 = mat1.contiguous().view(mat1_shape[0], -1, mat1_shape[-1])\n mat2 = move_dim(mat2, 1, -1)\n mat2 = mat2.contiguous().view(mat2_shape[0],...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes ``torch.bmm(mat1.exp(), mat2.exp()).log()`` in a numerically stable way.
def batch_logmatmulexp(mat1: torch.Tensor, mat2: torch.Tensor, use_mm: bool = False) -> torch.Tensor: mat1_shape = mat1.size() mat2_shape = mat2.size() mat1 = mat1.contiguous().view(mat1_shape[0], -1, mat1_shape[-1]) mat2 = move_dim(mat2, 1, -1) mat2 = mat2.contiguous().view(mat2_shape[0], -1, mat2_...
[ "def logmatmulexp(mat1: torch.Tensor, mat2: torch.Tensor, use_mm: bool = False) -> torch.Tensor:\n mat1_shape = mat1.size()\n mat2_shape = mat2.size()\n mat1 = mat1.contiguous().view(-1, mat1_shape[-1])\n mat2 = move_dim(mat2, 0, -1)\n mat2 = mat2.contiguous().view(-1, mat2_shape[0])\n\n if use_mm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes ``log(1 exp(x))`` in a numerically stable way.
def log1mexp(x: torch.Tensor) -> torch.Tensor: mask = (x < _log05).to(x.dtype) impl1 = torch.log1p(-torch.exp(x)) impl2 = torch.log(-torch.expm1(x)) return impl1 * mask + impl2 * (1 - mask)
[ "def log1p_exp(x):\n x_ = x * x.ge(0).to(torch.float32)\n res = x_ + torch.log1p(torch.exp(-torch.abs(x)))\n return res", "def log(x):\n\treturn log1p(x-1)", "def log(x):\r\n\r\n return math.log(x)", "def log_sum_exp(x):\r\n # TF ordering\r\n axis = len(x.size()) - 1\r\n m, _ = torch.max(x,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the malware id
def get_malware_id(self, date, malware_hash): malware_id = self.get_one(""" SELECT M.id FROM malwares M WHERE M.date = %s AND M.hash = %s; """, (date, malware_hash)) if malware_id: return malware_id else: self.insert(""" ...
[ "def get_verifier_id():\n cmd = (\"rally verify list-verifiers | awk '/\" +\n getattr(config.CONF, 'tempest_verifier_name') +\n \"/ {print $2}'\")\n with subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get python library based on sysconfig
def find_python_library(): python_library = sysconfig.get_config_var('LIBRARY') if (not python_library or os.path.splitext(python_library)[1][-2:] == '.a'): candidate_lib_prefixes = ['', 'lib'] candidate_implementations = ['python'] if hasattr(sys, "pypy_version_info"): candi...
[ "def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):\n if prefix is None:\n prefix = PREFIX\n if standard_lib:\n return os.path.join(prefix, \"lib-python\", sys.version[0])\n return os.path.join(prefix, 'site-packages')", "def get_python_libs():\n v = sysconfig.get_config_v...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Callback function for updating Peloton apps so that we can run integration tests inbetween updates of each app.
def update_callback(app): print("Update callback invoked for %s" % app.name) # TODO: Add integration tests here return True
[ "def test_update_web_app(self):\n pass", "def test_duo_application_update(self):\n pass", "def update_app(self):\n\n param = self.chose_param_value(\"--app\")\n self._check_path_availability([\"get_project_dir\", \"get_project_dir_to\"])\n if self._check_whether_has_params(par...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the cluster config from a yaml file
def load(cfg_file): with open(cfg_file, "r") as f: try: cfg = yaml.load(f) except yaml.YAMLError as ex: print("Failed to unmarshal cluster config %s" % cfg_file) raise ex return Cluster(cfg_file, **cfg)
[ "def read_cluster_config(\n path: str = aztk.utils.constants.DEFAULT_CLUSTER_CONFIG_PATH\n) -> ClusterConfiguration:\n if not os.path.isfile(path):\n return None\n\n with open(path, 'r', encoding='UTF-8') as stream:\n try:\n config_dict = yaml.load(stream)\n except yaml....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the diff between current and desired job config
def diff_config(self, app, verbose=False): print(">>>>>>>> Job config diff for %s <<<<<<<<" % app.name) cfg_dicts = [] factory = TSimpleJSONProtocolFactory() for cfg in app.current_job_config, app.desired_job_config: if cfg: cfg_json = TSerialization.serialize...
[ "def compare_config(self):\n '''\n # Set defaults\n base_file = 'running-config'\n base_file_system = 'system:'\n if self.config_replace:\n new_file = self.candidate_cfg\n else:\n new_file = self.merge_cfg\n new_file_system = self.dest_file_syst...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rolling update the Peloton apps in the cluster
def update(self, force, verbose): # Print the job config diffs print('Update Peloton cluster "%s" to new config: ' % self.name) for app in self.apps: self.diff_config(app, verbose) if not force and not yesno("Proceed with the update ?"): return updated_...
[ "def upgrade():\n config = ConfigManager()\n apps = config['apps']\n for i, app in progressbar(enumerate(apps), redirect_stdout=True):\n z = Zap(app)\n if i == 0:\n z.update(show_spinner=False)\n else:\n z.update(check_appimage_update=False, show_spinner=False)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rollback the updates to the list of apps in the cluster
def rollback(self, apps): while len(apps) > 0: app = apps.pop() print("Rolling back app %s ..." % app.name) app.rollback_job()
[ "def rollback(self):\n for db in self.values():\n db.rollback()", "def __update_application(self, apps, **extra_args):\n update_on_error = extra_args.get('update_on_error', False)\n # auto_enable_auth = extra_args.get(\n # 'auto_enable_auth', self.auto_enable_auth)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update docstring for constructed method.
def update_docstring(instance): try: docstring = instance.api_map['doc'] except (KeyError, TypeError): docstring = 'No docstring provided.' instance.__class__.__doc__ = docstring instance.__class__.__call__.__signature__ = construct_signature(instance) return docstring
[ "def _borrowdoc(method):\n if methodname is None:\n other_method = getattr(cls, method.__name__)\n else:\n other_method = getattr(cls, methodname)\n if hasattr(other_method, '__doc__'):\n if not replace:\n method.__doc__ = other_method.__doc__\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make a list of valid parameters. This accumulates all known parameters from any keys embedded in _path_, _default_params_, and _valid_params_.
def get_all_valid_params(instance): params = {} path_params = instance.find_path_keys(instance.api_map.get('path', '')) for param in path_params: params[param] = '' # Always make a list of valid parameters from endpoint mapping valid_params = instance.api_map.get('valid_params', []) if...
[ "def _set_available_params(self):\n self._available_params = {\n path: data\n for path, data in list(self._all_params.items())\n if self._valid_path(path)\n }", "def _get_all_possible_params(self):\n possible_params = ('tweet_url', 'image_url', )\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Exports a dictionary of inputs to a string. Inputs
def write_config_string(input_dict, entry_char='>', attribution_char='=', usekeys=None): # Selects the desired entries of the input_dict if usekeys is not None: input_dict = {key: input_dict[key] for key in usekeys} result_str = "" for key, value in input_dict.items(): ...
[ "def text_export(dict_input):\n transfer = open('artists and albums.txt', 'w+')\n\n for entry in dict_input:\n fixed = str(dict_input[entry])\n transfer.write(str(entry))\n transfer.write(fixed + '\\n')\n\n transfer.close()", "def write_input_params(file_output,input_params):\r\n\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a file until it finds a 'header finalization' line. By standard, such line is '\n' (five )
def skip_file_header(file_pointer, header_end="-----\n"): # Reads file line once line = file_pointer.readline() while line: # Reads until eof # Checks if the line is a header ender if line == header_end: return line = file_pointer.readline() # If EOF is reached wit...
[ "def _read_until_end_of_comments(self, fileobj):\n offset = fileobj.tell()\n line = fileobj.readline()\n if not line:\n raise EOFError(\"Read until EOF\")\n\n line = line.strip()\n if line.startswith(\"#\"):\n return self._read_until_end_of_comments(fileobj)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tries to read the output file from argv[argn]. If argv[argn] does not exist, the output file is set to sdt_name, whose standard value is 'output.txt'.
def get_output_file_name(argn=2, std_name='output.txt'): try: name = sys.argv[argn] except IndexError: name = std_name print("Warning: no output file name received. Output will be" " written to '%s'." % name) return name
[ "def get_output_file():\n if len(sys.argv) < 4:\n return -1\n return sys.argv[3]", "def get_output_file(args):\n if args.output_file == \"-\":\n filehandle = sys.stdout\n else:\n filehandle = open(args.output_file, \"w\")\n return filehandle", "def output_file_name_maker(args...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a folder path from argv (argv[2] by standard). Adds the separator character (/, \\) if it was forgotten. Checks if the folder exists, and creates it otherwise. If the corresponding position in argv is not informed, asks for the user the path of the folder, starting from a given root folder.
def get_output_folder_name(argi=2, root_folder=""): # First tries to read the output folder name from argv[2] try: output_folder = sys.argv[argi] except IndexError: # If argv[argi] was not passed, asks the user for the output folder. output_folder = root_folder output_folder ...
[ "def get_folder():\n return input(\"Folder: \")", "def safe_folder_creation(folder_path):\r\n # Boolean initialization\r\n folder = True\r\n\r\n while folder:\r\n # Check path validity\r\n if not os.path.exists(folder_path):\r\n os.makedirs(folder_path)\r\n folder =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads multiple strings separated by commas and removes border spaces.
def read_csv_names(string): return [remove_border_spaces(name) for name in string.split(',')]
[ "def clean_rows(reader):\n return [[a.strip() for a in row] for row in reader if row]", "def clean_strings(row):\n\n topic_names = row['Topic name']\n topic_names = topic_names.replace(', ', ',') # Remove spaces\n topic_names = topic_names.split(',')\n return topic_names", "def test_lstr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create 2 1D gaussian Kernels by taking in same kernel size and sigma value that sigy = 3sigx, and form 2D kernel
def MVgaussian(size,mu1=0,mu2=0, sigma1=3,sigma2 = 1): kernel = np.zeros((size, size), dtype=np.float32) size = int(size) // 2 X = np.arange(-size,size+1) Y = np.arange(-size,size+1) for x in X: for y in Y: Gx = np.exp(-((x-mu1)**2)/(2*(sigma1**2))) Gy = np....
[ "def gaussian_2dkernel(size=5, sig=1.0):\r\n gkern1d = signal.gaussian(size, std=sig).reshape(size, 1)\r\n gkern2d = np.outer(gkern1d, gkern1d)\r\n return gkern2d / gkern2d.sum()", "def gaussian_kernel(x: torch.Tensor, y: torch.Tensor, sigma2: torch.Tensor) -> LazyTensor:\n ret, s = rbf_kernel(x, y, s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DoG (Difference of Gaussian) Filter is generated by convolving Sobel Kernel with a Gaussian Kernel under a given size, orientation and scale. ie obtain first derivative of Gaussian Kernel. DoG Filter Bank is a set of DoG filters generated by obtaining first derivative of Gaussian kernels under various orientations and ...
def OrientedDoG(size=7,scales=[1,2],n_orientations=8): filt_count = 0 # declare the sobel kernel sobel_kernel = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=np.float32) # pre define a dummy filter bank n_filters = len(scales)*n_orient...
[ "def gaussian2d(filter_size=5, sig=1.0):\n ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n return kernel / np.sum(kernel)", "def run_dog(self):\n u.printf(\"Running Differen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a partial role from the given `role_id`. If the role already exists returns that instead.
def create_partial_role_from_id(role_id, guild_id = 0): try: return ROLES[role_id] except KeyError: pass role = Role._create_empty(role_id, guild_id) ROLES[role_id] = role return role
[ "def get_by_id(self, role_id: int) -> Optional[Role]:\n\n with self.get_connection() as conn:\n with conn.cursor() as cursor:\n sql = (\n \"SELECT `name`, `desc`, `role_id`\"\n \"FROM `ab_role`\"\n \"WHERE `role_id`=%s\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the text is a role mention, returns the respective role if found.
def parse_role_mention(text): parsed = ROLE_MENTION_RP.fullmatch(text) if parsed is None: return role_id = int(parsed.group(1)) return ROLES.get(role_id, None)
[ "def parse_role(text, guild = None):\n parsed = ID_RP.fullmatch(text)\n if (parsed is not None):\n role_id = int(parsed.group(1))\n try:\n role = ROLES[role_id]\n except KeyError:\n pass\n else:\n return role\n \n role = parse_role_mention(tex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tries to parse a role out from the given text.
def parse_role(text, guild = None): parsed = ID_RP.fullmatch(text) if (parsed is not None): role_id = int(parsed.group(1)) try: role = ROLES[role_id] except KeyError: pass else: return role role = parse_role_mention(text) if (role ...
[ "def parse_role_mention(text):\n parsed = ROLE_MENTION_RP.fullmatch(text)\n if parsed is None:\n return\n \n role_id = int(parsed.group(1))\n return ROLES.get(role_id, None)", "def parse_role(content_str: str) -> int:\n return parse_discord_str(content_str, '@&')", "def read_role(role, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fixture to get minigraph facts
def minigraph_facts(duthosts, rand_one_dut_hostname, tbinfo): duthost = duthosts[rand_one_dut_hostname] return duthost.get_extended_minigraph_facts(tbinfo)
[ "def test_get_hyperflex_node_by_moid(self):\n pass", "def test_get_hyperflex_cluster_by_moid(self):\n pass", "def one_real_graph() -> programl_pb2.ProgramGraph:\n return next(random_networkx_generator.EnumerateTestSet())", "def test_from_manual(self):\n data = {0: {1: {\"weight\": 1}}}\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a blog post comment using `commentmarkup.parse` function.
def parse_comment(value): try: return mark_safe(commentmarkup.parse(value)) except ValueError: return value
[ "def parse_comment(comment, postid):\n urls = get_links_from_body(comment.body)\n if urls:\n # Only insert comment into DB if it contains a link\n comid_db = db.insert('Comments',\n (None,\n postid,\n comment.i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the given text using the markup parser defined in `settings.MARKUP_PARSER` (or `limited.markup.pygtile`, if none was set).
def parse(*args, **kwargs): if hasattr(settings, 'MARKUP_PARSER'): parser = settings.MARKUP_PARSER if not callable(parser): parser = MARKUP_PARSERS.get(parser, 'textile') else: parser = MARKUP_PARSERS['textile'] try: return mark_safe(parser(*args, **kwargs)) e...
[ "def _parse_markup(self, markup_text):\n markdown_extensions = self._set_markdown_extensions()\n\n html_content = markdown.markdown(\n markup_text,\n extensions=markdown_extensions,\n )\n\n return html_content", "def markdown_parse(text):\r\n text = md_pars...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Class method. Instanciates several synapses with various dimensions.
def instantiate(cls, dim_list, **kwargs): instances = (cls(dim=dim, **kwargs) for dim in dim_list) syn_dict = {str(inst.dim):inst for inst in instances} print("Synapses instanciated: {}".format(syn_dict.keys())) return syn_dict
[ "def _spawn_syndics(self):\n self._syndics = OrderedDict() # mapping of opts['master'] -> syndic\n masters = self.opts[\"master\"]\n if not isinstance(masters, list):\n masters = [masters]\n for master in masters:\n s_opts = copy.copy(self.opts)\n s_opts...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Class method. Loads a particular synapse which has been previousely saved.
def retrieve_synapse(cls, synindex): try : synapses_register = load_register('all_syn') # Retrieving the desired attributed attrs = synapses_register[synapses_register["Index"]==synindex].drop(['Index'], axis=1).to_dict('records')[0] # Intanciation of the synapse:...
[ "def _load_workflow( self, workflow_id ):\n id = self.app.security.decode_id( workflow_id )\n stored = self.app.model.context.query( self.app.model.StoredWorkflow ).get( id )\n return stored.latest_workflow", "def _load_station(self, station=None):\n self._total_tracks = 100\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs a simulation with the current instance. If the simulation has already been run previousely, the response is retrived in the attribute "resp".
def simulate_stimulation(self, patt): # Defining the response: self.identities['resp'] = identity_summary('resp', patt) respindex = attribute_index('resp', self) # Running the simulation if no response has been computed for this pattern: if respindex == None : print('...
[ "def run(self, simulation):", "def test_run_simulation_stores_result(self):\n sim = ss.Simulation()\n assert sim.results == []\n sim.run_simulation(10)\n assert sim.results != []\n assert len(sim.results) == 10", "def run_sim(params):\n sim = Simulation(params)\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves the response of the current instance, stored in the attribute "resp". It registers the synapse in the register of all synapses, if it has never been saved before. It registers the response in the register of responses of the synapse. It creates a subfolder for the response in the directory of the synapse, to drop...
def save_response(self): self.indexes['resp'] = attribute_index('resp', self) # Checking if the attribute "resp" is not empty: if not type(self.resp['coords']) == np.ndarray: print("Response is empty. Please run a simulation.") # Checking if the target response has already be...
[ "def save_response(self, res) -> None:\n file = open(\"response_{}.json\".format(self.num_res), \"w\")\n file.write(str(res))\n file.close()", "def savetrial(self, resp, resptime):\n \n s1 = self.design['S1'][self.currenttrial] #Das Vergleichsbild\n s2 = self.design['S2']...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Displays the matrixes representing the synapse.
def visualize_synapse(self, print_coords=False): S = self.S.copy() I = self.I.copy() if print_coords : print_matrix(S, I, self.res, print_coords=True, coords=self.resp['coords']) else : print_matrix(S, I, self.res)
[ "def show_matrix(self):\n print str(self.matrix)", "def show(self):\n return self.matrix", "def display(self):\n distance_matrix = \"(define distance-matrix (list\"\n for source, targets in self.__distances.iteritems():\n for target, meters in targets.iteritems():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make a proper FQDN from name
def _ensure_fqdn(self, name): if name[-1:] != ".": return "%s." % name else: return name
[ "def create_dns_name ( base_name, name ) :\n return create_r53_name( base_name, name) + '.mse-esp.com'", "def getfqdn(name=''):\r\n name = name.strip()\r\n if not name or name == '0.0.0.0':\r\n name = gethostname()\r\n try:\r\n hostname, aliases, ipaddrs = gethostbyaddr(name)\r\n excep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the data labels that correspond to the data samples
def data_labels(data): # The data consists of a equal number of benign and deleterious samples # The first part of the data are the benign samples (label 0), and the second part the deleterious ones (label 1) n_samples = data.shape[0] n_class_samples = int(n_samples / 2) # Get a numpy array of the...
[ "def labels(data):\n\n return data[:,0]", "def get_labels(self) -> List[int]:\n return [self.dataset[i][1] for i in range(self.num_samples())]", "def get_sample_labels(self):\n labels = [i for i, j in zip(self.CLASS_LIST, self.sample_target) if j]\n if labels == []:\n labe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a heatmap for metrics that target single qubits.
def heatmap(self, key: str) -> vis.Heatmap: metrics = self[key] assert all(len(k) == 1 for k in metrics.keys()), ( 'Heatmaps are only supported if all the targets in a metric' ' are single qubits.') assert all(len(k) == 1 for k in metrics.values()), ( 'Heatmap...
[ "def get_heat_mapplot(self) -> None:", "def heatmap(self, **kwargs) -> None:\n self.initialization_figure\n self.builder.get_heat_mapplot(**kwargs)", "def plot_qmat(q_mat):\n sns.heatmap(q_mat, cmap=\"YlGnBu\")\n sns.plt.title(\"Q Matrix Heatmap\", fontweight='bold')\n sns.plt.ylabel(\"St...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the maximum flux
def max_flux(self): return np.max(self.flux)
[ "def max_flux(frame):\n return np.max(frame.fluxes[frame.radii <= max_extent_px])", "def max_time(self):\n return self.time[np.argmax(self.flux)]", "def maxfield(self):\n\t\tz=self.zValues()\n\t\twindow=max(z)-min(z)\n\t\ti_z= where(z>(0.8*window+min(z)))\n\t\tEy = self.field_on_axis_Ey()\n\t\tnew...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the time of the maximum flux
def max_time(self): return self.time[np.argmax(self.flux)]
[ "def peak_time(self):\n return np.array([self.wftime[ch][self.waveform[ch].argmax()] for ch in range(self.nchannels)])", "def max_flux(self):\n return np.max(self.flux)", "def max_time(self):\n #{{{ function to return time of last sample\n\n if self.maxtime == -1:\n return stock.n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the reference time, defaults to the mid time
def reference_time(self): if hasattr(self, '_reference_time') is False: self._reference_time = self.midtime return self._reference_time
[ "def get_time(self):\n if not self.simulated:\n return datetime.now()\n else:\n return self.simulated_time", "def get_corrected_time(self):\r\n with self.lock:\r\n self.local_current_time = time.time()\r\n local_delta_from_anchor = \\\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Naive estimate of the pulse time Uses the mean of flux above a fraction f of the maximum fluc
def estimate_pulse_time(self, f=0.75): idxs = np.abs(self.flux) > f * self.max_flux return np.mean(self.time[idxs])
[ "def flux_test(t):\n if t % 10 == 0:\n return 50\n else:\n return 0", "def testPeakLikelihoodFlux(self):\n # make mp: a flux measurer\n measControl = measAlg.PeakLikelihoodFluxControl()\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSource...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read in the time and flux from a csv The filename must point to a commaseparated file with at least two columns, "time" and "flux". Optionally, an additional "pulse_number" column can exist, if the pulse_number is specified, only data matching the requested pulse number will be loaded.
def from_csv(cls, filename, pulse_number=None): df = pd.read_csv(filename) return cls._sort_and_filter_dataframe(df, pulse_number)
[ "def read_temperature_csv(path: str):\n with open(file=path, mode=\"r\", encoding=\"utf-8\") as csv_rf:\n reader = csv.DictReader(csv_rf)\n fieldnames = reader.fieldnames\n\n if \"time\" not in fieldnames and \"temperature\" not in fieldnames:\n raise ValueError(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read in the time and flux from a pandas h5 file The filename must point to a h5 dataframe file. The dataframe should have at least two columns, "time" and "flux". Optionally, an additional "pulse_number" column can exist, if the pulse_number is specified, only data matching the requested pulse number will be loaded.
def from_h5(cls, filename, pulse_number=None): df = pd.read_hdf(filename) return cls._sort_and_filter_dataframe(df, pulse_number)
[ "def read_from_h5(file_name, **kwargs):\n import re\n import h5py\n from scipy.interpolate import InterpolatedUnivariateSpline as Spline\n\n phase_re = re.compile(\"phase_l(?P<ell>.*)_m(?P<m>.*)\")\n amp_re = re.compile(\"amp_l(?P<ell>.*)_m(?P<m>.*)\")\n\n with h5py.File(file_name, \"r\") as f:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send Grafana annotations to various endpoints
def main(annotate_uri, api_key, title, tags, description, start_time, end_time, debug): log_level = logging.INFO if debug: log_level = logging.DEBUG logging.basicConfig(format=' [%(levelname)s] %(message)s', level=log_level) try: if description is None: if not sys.stdin.is...
[ "def create_annotation(grafana_url, grafana_api_key, tags, text):\n return requests.post(\n grafana_url + \"/api/annotations\",\n json={\n 'tags': tags,\n 'text': text,\n 'time': int(time.time() * 1000),\n 'isRegion': False\n },\n headers={\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create an local command Test Runner object create the log directory
def start_local_cmd(log_path, log_name="localcmd"): log_dir = os.path.abspath(log_path) try: os.makedirs(log_dir) except OSError: pass return CmdRunner.CmdRunner(log_dir, log_name)
[ "def create(self, work_dir):\n self.process_runner = new_process.ProcessRunner(\n os.path.join(work_dir, '../emulator/run'))", "def _create_logdir(self):\n if self.identifier.name == \"RGDataset\":\n # TODO(karan): handle temporarily constructed datasets differently\n self.l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Export CTSDG generator for inference
def export_ctsdg(cfg): generator = Generator( image_in_channels=config.image_in_channels, edge_in_channels=config.edge_in_channels, out_channels=config.out_channels ) generator.set_train(False) load_checkpoint(cfg.checkpoint_path, generator) ckpt_path = Path(cfg.checkpoint_p...
[ "def load_generator(\n ckpt, is_stylegan1, G_res, out_size, noconst, latent_dim, n_mlp, channel_multiplier, dataparallel, base_res_factor\n):\n if is_stylegan1:\n generator = G_style(output_size=out_size, checkpoint=ckpt).cuda()\n else:\n generator = Generator(\n G_res,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the disparity value at each pixel by searching a small patch around a pixel from the left image in the right image.
def calculate_disparity_map( left_img: torch.Tensor, right_img: torch.Tensor, block_size: int, sim_measure_function: Callable, max_search_bound: int = 50, ) -> torch.Tensor: assert left_img.shape == right_img.shape (H,W,C) = left_img.shape H_offset = block_size//2 W_offset = block_s...
[ "def disparity_map(image_left, image_right):\n\n # create SGBM with default parameters\n sgbm = cv2.StereoSGBM()\n\n # Change to appropriate parameters\n # These parameters pass the unit test\n\n # Matched block size >= 1 in between 3,11\n sgbm.SADWindowSize = 5\n # Maximum disparity minus mini...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the cost volume. Each pixel will have D=max_disparity cost values associated with it. Basically for each pixel, we compute the cost of different disparities and put them all into a tensor.
def calculate_cost_volume( left_img: torch.Tensor, right_img: torch.Tensor, max_disparity: int, sim_measure_function: Callable, block_size: int = 9, ): # placeholders H = left_img.shape[0] W = right_img.shape[1] H_offset = block_size//2 W_offset = block_size//2 cost_volume = ...
[ "def computeNodeVolumes(self):\n for i in np.arange(0,self.ni):\n for j in np.arange(0,self.nj):\n for k in np.arange(0,self.nk):\n \n V = self.dh[0]*self.dh[1]*self.dh[2]\n if (i==0 or i==self.ni-1): V*=0.5\n if (j==0 ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify the init calls the ResponseRetriever and assigns it to a variable.
def test_init_creates_retriever(self, mock_retriever): mediator = GenericMediator() with self.subTest(): mock_retriever.assert_called_once_with() self.assertIsNotNone(mediator.retriever)
[ "def __init__(self, response: Mock) -> None:\n self.response = response", "def test_init(self):\n payload = payloads.GetResponsePayload()\n\n self.assertEqual(None, payload.object_type)\n self.assertEqual(None, payload.unique_identifier)\n self.assertEqual(None, payload.secret)"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context).
def _get_lines_from_file(filename, lineno, context_lines): try: source = open(filename).readlines() lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines pre_context = \ [line.strip('\n') for line in source[lower_bound:lineno]] contex...
[ "def _get_lines_from_file(filename, lineno, context_lines):\r\n try:\r\n source = open(filename).readlines()\r\n lower_bound = max(0, lineno - context_lines)\r\n upper_bound = lineno + context_lines\r\n\r\n pre_context = \\\r\n [line.strip('\\n') for...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts the number of mines in the neighboring cells
def count_neighbor_mines(self, i, j): n_neighbor_mines = -1 if not self.mines[i, j]: n_neighbor_mines = np.count_nonzero( self.mines[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2]) return n_neighbor_mines
[ "def count_mines(self):\n for i in range(0, self.field_height):\n for j in range(0, self.field_width):\n if self.field[i][j] != FIELD_MINE:\n for delta_i in range(-1, 2):\n for delta_j in range(-1, 2):\n if (not (d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts the number of flags in the neighboring cells
def count_neighbor_flags(self, i, j): return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])
[ "def count_neighbours(self, mask):\n from scipy.ndimage.filters import convolve\n\n mask = mask.astype('uint8')\n filter_args = {'mode': self.boundary}\n if self.boundary == 'empty':\n filter_args['mode'] = 'constant'\n filter_args['cval'] = 0\n elif self.bou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates revealed cells by checking i, j cell and, recursevely, the contiguous cells without mines
def update_revealed(self, i, j): if not self.revealed[i, j]: # If not revealed cell if self.mines_count[i, j] < 0: # If wrong guess, games is over self.wrong = ~self.mines & self.flags self.wrong[i, j] = True self.game_over(...
[ "def mark_all_mine_cells(self, log=False):\n\n success = False \n\n for i in range(self.dim): \n for j in range(self.dim): \n\n if self.cells[i, j].covered and (self.cells[i, j].safe == False) and (not self.cells[i, j].flag):\n self.toggle_flag(i, j, log)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
model_is_cuda = next(model.parameters()).is_cuda return model.module._labels if model_is_cuda else model._labels
def get_labels(model): return model._labels
[ "def is_cuda(model):\n\treturn next(model.parameters()).is_cuda", "def _get_labels(model, X):\n try:\n y = model.labels_\n except AttributeError:\n y = model.predict(X)\n return y", "def classify(self):\n self._model.eval()\n with torch.no_grad():\n for data, targ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
model_is_cuda = next(model.parameters()).is_cuda return model.module._labels if model_is_cuda else model._labels
def get_labels(model): return model._labels
[ "def is_cuda(model):\n\treturn next(model.parameters()).is_cuda", "def _get_labels(model, X):\n try:\n y = model.labels_\n except AttributeError:\n y = model.predict(X)\n return y", "def classify(self):\n self._model.eval()\n with torch.no_grad():\n for data, targ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Just a display() helper to print html code
def print_html(html): display(HTML(html))
[ "def display(): # pylint: disable=no-method-argument\n pass # pylint: disable=unnecessary-pass", "def render_html(self) -> str:", "def render(self):\n print(self.__generate_output())", "def exec_display(self, stmt: DisplayStmt):\n self.stmt_print(str(self.evaluator.eval_node(stmt.content)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the cell in choices that is nearest from the target_cell.
def nearest_hex_cell(target_cell, choices): if choices: return _first(sorted(choices, key=lambda cell: distance_between_hex_cells(target_cell, cell)))
[ "def get_closest_match(self, cells, matching_threshold, suppress_non_answer_cells=False):\n return_value = []\n distances = [Levenshtein.distance(self.start_md, u''.join(cell['source'])) for cell in cells]\n if min(distances) > matching_threshold:\n return return_value\n\n bes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the cell in choices that is furthest from the target_cell.
def furthest_hex_cell(target_cell, choices): if choices: return _first(sorted(choices, reverse=True, key=lambda cell: distance_between_hex_cells(target_cell, cell)))
[ "def nearest_hex_cell(target_cell, choices):\n if choices:\n return _first(sorted(choices,\n key=lambda cell: distance_between_hex_cells(target_cell, cell)))", "def get_best(self):\n return min(self.solutions, key=lambda s: s.f)", "def choose_best_neighbour_simple(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests to see if an object starting at start with a given heading will pass through the target cell
def is_on_course_with(start, heading, target): v = Volant(*(tuple(start) + (heading,))) if start == target: return True for _ in range(distance_between_hex_cells(start, target)): v = v.advance() if v.xyh == (tuple(target) + (heading,)): return True return False
[ "def is_header_part(cell: str) -> bool:\n pattern = '|'.join([\n rf'(?:(?:three|3|six|6|nine|9|twelve|12)\\s+months?(?:\\s+periods?)?|quarters?|year|ytd)(?!ly)',\n rf'\\b(?:{MONTH})\\b',\n rf'^(?:end(?:ed|ing))?(?:20)\\s*[0-2]\\s*[0-9]{FOOTNOTE}$',\n rf'^\\d{1, 2}/\\d{1, 2}/\\d{2, 4}{...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts the numpy image saved to 'fn' into a .png The resulting image is saved with the same filename, except with the .png extension (input image must have extension .npy).
def convert(fn): assert fn[-4:] == ".npy", "%s: File extension should match '.npy'" % fn print "Converting file '%s' to .png" % fn numpy_img = np.load(fn) #Rescale to 0-255 and convert to uint8 rescaled = (255.0 / numpy_img.max() * (numpy_img - numpy_img.min())).astype(np.uint8) im = Image.fr...
[ "def save_image_array_as_png(image: Image, output_path: Path) -> None:\n image_pil = fromarray(numpy.uint8(image)).convert(\"RGB\")\n with tensorflow.gfile.Open(output_path, \"w\") as fid:\n image_pil.save(fid, \"PNG\")", "def to_img_path(filename):\n if self.png_format_used:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the name of the Controller.
def name(self) -> str: return "Controller"
[ "def __str__(self) -> str:\n return 'Rachio controller \"{}\"'.format(self.name)", "def network_fabric_controller_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_fabric_controller_name\")", "def name(self) -> str:\n if self._open_ctrl:\n return f\"{...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the preset mode; if None, then revert to 'Auto' mode.
def set_preset_mode(self, preset_mode: str | None) -> None: self.svc_set_system_mode(PRESET_TO_TCS.get(preset_mode, SystemMode.AUTO))
[ "def set_preset_mode(self, preset_mode: Optional[str]) -> None:\n self._set_tcs_mode(HA_PRESET_TO_TCS.get(preset_mode, EVO_AUTO))", "def set_preset_mode(self, preset_mode: str) -> None:\n self._send_command([{\"code\": DPCODE_MODE, \"value\": preset_mode}])", "def set_preset_mode(self, preset_mode...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset the (native) operating mode of the Controller.
def svc_reset_system_mode(self) -> None: self._call_client_api(self._device.reset_mode)
[ "def reset(self, mode='soft'):\r\n return self.vmrun('reset', mode)", "def soft_reset(self):\n self._get_controller().soft_reset()", "def hard_reset(self):\n self._get_controller().hard_reset()", "def set_mode_off(self):\n self.command.ctrl_mode=mob.OMNIBASE_CTRL_OFF", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the Zone's current preset mode, e.g., home, away, temp.
def preset_mode(self) -> str | None: if self._device.tcs.system_mode is None: return # unable to determine # if self._device.tcs.system_mode[CONF_SYSTEM_MODE] in MODE_TCS_TO_HA: if self._device.tcs.system_mode[CONF_SYSTEM_MODE] in ( SystemMode.AWAY, SystemMo...
[ "def preset_mode(self):\n if self._presets is not None:\n presets = self._presets\n preset_temperature = presets.get(\n self._preset_mode, \"none\"\n )\n if self.hvac_mode == HVAC_MODE_AUTO:\n if (\n self._thermostat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fake the measured temperature of the Zone sensor.
def svc_put_zone_temp( self, temperature: float, **kwargs ) -> None: # set_current_temp self._device.sensor._make_fake() self._device.sensor.temperature = temperature self._device._get_temp() self.update_ha_state()
[ "def testtemperature(self) -> None:\r\n assert 298.15 == self.data.temperature", "def get_temperature(self):\n pass", "def adjustTemperature( time, temp) :\n variance = random.randrange( 5)\n if isDaylight( time) :\n temp = temp + variance\n else :\n temp = temp - variance\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset the configuration of the Zone.
def svc_reset_zone_config(self) -> None: self._call_client_api(self._device.reset_config)
[ "def svc_reset_zone_mode(self) -> None:\n self._call_client_api(self._device.reset_mode)", "def reset( self ):\n self.conf = self.defaults", "def reset_config():\n return _set_config(_gen_config())", "def reset(self):\r\n self.clear()\r\n self._initialise()\r\n # FIXME: S...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset the (native) operating mode of the Zone.
def svc_reset_zone_mode(self) -> None: self._call_client_api(self._device.reset_mode)
[ "def svc_reset_system_mode(self) -> None:\n self._call_client_api(self._device.reset_mode)", "def reset(self, mode='soft'):\r\n return self.vmrun('reset', mode)", "def svc_reset_zone_config(self) -> None:\n self._call_client_api(self._device.reset_config)", "def system_reset(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the configuration of the Zone (min/max temp, etc.).
def svc_set_zone_config(self, **kwargs) -> None: self._call_client_api(self._device.set_config, **kwargs)
[ "def zone(self, zone: str):\n\n self._zone = zone", "def zone(self, zone):\n\n self._zone = zone", "def _set_watering_time(self, zoneid, value):\n if value not in MANUAL_WATERING_ALLOWED:\n raise ValueError(\n 'Valid options are: {}'.format(\n ',...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return Position of p's subtree having key k, or last node searched
def _subtree_search(self, p, k): if k == p.key(): # found match return p elif k < p.key(): # search left subtree if self.left(p) is not None: return self._subtree_search(self.left(p), k) else: ...
[ "def _subtree_search(self, p, k):\n if k == p.key():\n return p\n elif k < p.key():\n if self.left(p) is not None:\n return self._subtree_search(self.left(p), k)\n else:\n if self.right(p) is not None:\n return self._subtree_search(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return Position of first item in subtree rooted at p
def _subtree_first_position(self, p): walk = p while self.left(walk) is not None: walk = self.left(walk) # keep walking left return walk
[ "def _subtree_first_position(self, p):\n \"\"\"will be used by before()\"\"\"\n walk = p\n #recursivly walking to the left child until the left subtree has no child\n while self.left(walk) is not None:\n walk = self.left(walk)\n return walk", "def _retrieve_index(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return Position of last item in subtree rooted at p
def _subtree_last_position(self, p): walk = p while self.right(walk) is not None: walk = self.right(walk) # keep walking right return walk
[ "def _subtree_last_position(self, p):\n walk = p\n while self.right(walk) is not None:\n walk = self.right(walk)\n return walk", "def after(self, p):\n self._validate(p)\n if self.right(p):\n return self._subtree_first_position(self.right(p))", "def _retr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the first Position in the tree (or None if empty)
def first(self): return self._subtree_first_position(self.root()) if len(self) > 0 else None
[ "def get_first(self) -> object:\r\n if self.root is None: # If the tree is empty, the first value will be None\r\n return None\r\n else:\r\n return self.root.value", "def get_first(self) -> object:\n #binary search tree == empty\n if self.root is None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the last Position in the tree (or None if empty)
def last(self): return self._subtree_last_position(self.root()) if len(self) > 0 else None
[ "def last_node(self):\n nodes = self.as_list()\n\n if nodes:\n # If there are nodes return the last one.\n return nodes[-1]\n # No nodes, return None\n return None", "def last(self) -> Optional[MappingTree]:\n if len(self._parent_keys) == 0:\n se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the Position just after p in the natural order Return None if P is the last position
def after(self, p): self._validate(p) if self.right(p): return self._subtree_first_position(self.right(p))
[ "def after(self, p):\n node = self._validate(p)\n return self._make_position(node._next)", "def get_order(self, p, limit=None):\n order = 1\n res = p\n while not self.is_null(res):\n res = self.add(res, p)\n order += 1\n if limit is not None and ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Refresh the visualization session information.
def _refresh(request): # check the session for a vtkweb instance vis = request.session.get('vtkweb') if vis is None or vtk_launcher.status(vis.get('id', '')) is None: # open a visualization instance vis = vtk_launcher.new_instance() request.session['vtkweb'] = vis return dict(v...
[ "def refresh_plot(self):\n self.view.canvas.draw()", "def update_plot():\n pass", "def refresh(self):\n pass", "def plot_refresh():\n figure.canvas.draw()", "def refresh_by_session():\n docker_interface.refresh(session['user_container_name'])\n return jsonify({'result': 'succes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set duration for test.
def set_duration(self, duration): self.__test_result[Result.__DURATION] = round(duration * 1000)
[ "def setDuration(self, duration):\n self.duration = duration", "def set_duration(self, duration):\n \n self.duration = float(duration)", "def duration(self, duration):\n self._duration = duration", "def set_duration(self, duration):\n\n if duration > 0:\n self.dur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set status and message for specify step.
def set_step_status(self, step_summary: str, status: str = Status.PASSED, message: str = None): temp = {Result.__STEP: step_summary, Result.__STATUS: status, Result.__MESSAGE: message} self.__run.append(temp)
[ "def updateStepStatus(self, status):\n self.annotate_status = BuilderStatus.combine(self.annotate_status, status)\n last = self.sections[-1]\n last['status'] = BuilderStatus.combine(last['status'], status)\n if self.halt_on_failure and last['status'] in [\n builder.FAILURE, builder.EXCEPTION]:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a step to report.
def add_step(self, step): if not step: return temp = {Result.__STEP: step.get_name(), Result.__STATUS: step.get_status(), Result.__MESSAGE: step.get_message()} self.__run.append(temp)
[ "def add_step(self, step: FirstStepBase) -> None:\n\n self.steps.append(step)", "def add_step(self, step):\n\n step.index = len(self.steps)\n self.steps[step.name] = step", "def addStep(self, step, index = -1):\n self.steps.append(step) # Add the step to the end", "def _add_step(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set status of test to FAILED.
def set_test_failed(self): self.set_result(Status.FAILED)
[ "def failed(self):\n self._state = \"FAILED\"", "def SetUnexpectedFailure(test_result):\n test_result['status'] = 'FAIL'\n test_result['expected'] = False\n logging.error('Processing failed for test %s', test_result['testPath'])", "def set_test_passed(self):\n self.set_result(Status.PASSED)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set status of test to PASSED.
def set_test_passed(self): self.set_result(Status.PASSED)
[ "def set_test_failed(self):\n self.set_result(Status.FAILED)", "def update_test_status(self, succeeded: bool, db: Session) -> None:\n self.last_test_timestamp = datetime.now()\n self.last_test_succeeded = succeeded\n self.save(db)", "def test_pass_case(self):\r\n result = self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the status of test.
def get_test_status(self) -> str: return self.__test_result[Result.__RESULT]
[ "def test_status(self) -> str:\n return self._test_status", "def test_get_status(self):\n pass", "def status(self):\n return self.run_data.get('status')", "def status(self):\n stat = self.run_status.get()\n return stat", "def get_status(self):\n return self._status"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The MEAM alloy parameters for a pair of symbol models.
def alloy(self, symbols): try: return self.alloys[((self.alloys.Sym1==symbols[0]) & (self.alloys.Sym2==symbols[1]))].iloc[0] except: raise ValueError(f'MEAM parameters for alloy symbols {symbols} not found')
[ "def model_parameters():\n return k, d", "def _define_combined_fit_params(self):\n\t\tself.z_bins = np.array([0.40, 0.60, 0.80, 1.00, 1.20,\n\t\t 1.40, 1.60, 1.80, 2.20, 2.40, \n\t\t 2.50, 2.60, 2.70, 2.80, 2.90,\n\t\t 3.00, 3.10, 3.20, 3.30, 3....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get chat ID and message text of most recent message sent to Bot
def get_last_chat_id_and_text(updates): num_updates = len(updates["result"]) last_update = (num_updates - 1) text = updates["result"][last_update]["message"]["text"] chat_id = updates["result"][last_update]["message"]["chat"]["id"] return text, chat_id
[ "def recent_message(self):\n ordered_messages = self.messages.order_by('-timestamp')\n if len(ordered_messages):\n message = ordered_messages[0]\n else:\n message = None\n return message", "def last_message(self):\n return self.messages[-1]", "def last_me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }