query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Determines the convective heat transfer coefficient, either free, or forced. In the absence of any wind, the "free" wind_heat_transfer_coefficient is returned. If there is wind present, then this parameter is known as the "forced" wind_heat_transfer_coefficient.
def wind_heat_transfer_coefficient(self) -> float: return 3.8 + 2 * self.wind_speed # return 4.5 + 2.9 * self.wind_speed
[ "def thermal_conductivity(self):\n return self.fluid.conductivity(self.T_C)", "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.3...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a nice representation of the weather conditions.
def __repr__(self) -> str: return ( "WeatherConditions(" f"ambient_temperature: {self.ambient_temperature:.3f}K, " f"azimuthal_angle: {self.azimuthal_angle}deg, " f"declination: {self.declination}deg, " f"density: {self.density_of_air:.3f}kg/m^3, " ...
[ "def print_conditions(self):\n _outstr = \"\"\n first = True\n for cond in self._conditions:\n if not first:\n _outstr += \", \"\n if cond in ThresholdCheck._default_min_conditions:\n _outstr += \"{:s}={:.2e}\".format(cond, self._conditions[co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a Model from a formula and dataframe.
def from_formula(cls, formula, data, subset=None, drop_cols=None, *args, **kwargs): # TODO: provide a docs template for args/kwargs from child models # TODO: subset could use syntax. GH#469. if subset is not None: data = data.loc[subset] eval_env = kwargs...
[ "def from_formula(\n cls, formula, data, subset=None, drop_cols=None, *args, **kwargs\n ):\n raise NotImplementedError(\"formulas are not supported for VAR models.\")", "def convert(self, df):\n return convert_df_to_model(\n model_type=self.model_type, df=df,\n outcom...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Score vector of model. Default implementation sums score_obs. The gradient of loglike with respect to each parameter.
def score(self, params, *args, **kwargs): try: # If an analytic score_obs is available, try this first before # falling back to numerical differentiation below return self.score_obs(params, *args, **kwargs).sum(0) except NotImplementedError: # Fallback in ...
[ "def score_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If no start_params are given, use reasonable defaults.
def _get_start_params(self, start_params=None): if start_params is None: if hasattr(self, 'start_params'): start_params = self.start_params elif self.exog is not None: # fails for shape (K,)? start_params = [0] * self.exog.shape[1] ...
[ "def _sets_default_params(self):\n pass", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def add_default_params(self):\r\n self.params = class_from_string(\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the Ftest for a joint linear hypothesis. This is a special case of `wald_test` that always uses the F distribution.
def f_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None): res = self.wald_test(r_matrix, cov_p=cov_p, scale=scale, invcov=invcov, use_f=True) return res
[ "def test_variational_expectations(likelihood_setup):\n likelihood = likelihood_setup.likelihood\n F = Datum.F\n Y = likelihood_setup.Y\n r1 = likelihood.log_prob(F, Y)\n r2 = likelihood.variational_expectations(F, tf.zeros_like(F), Y)\n assert_allclose(r1, r2, atol=likelihood_setup.atol, rtol=lik...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute a sequence of Wald tests for terms over multiple columns This computes joined Wald tests for the hypothesis that all coefficients corresponding to a `term` are zero. `Terms` are defined by the underlying formula or by string matching.
def wald_test_terms(self, skip_single=False, extra_constraints=None, combine_terms=None): # noqa:E501 result = self if extra_constraints is None: extra_constraints = [] if combine_terms is None: combine_terms = [] design_info = getattr(res...
[ "def get_all(any, shard, shard_term_features, qterms):\n tmp = 1\n for t in qterms:\n if t in shard_term_features[shard]:\n cdf = shard_term_features[shard][t].df\n else:\n cdf = 0\n tmp *= cdf/any\n all = tmp * any\n return all", "def test_alchemy_extraction...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats dictated text to camel case.
def camel_case_text(text): newText = format_camel_case(text) Text("%(text)s").execute({"text": newText})
[ "def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output", "def camelHump( text ):\n # make sure the first letter is upper case\n output = ''.join([ capitalize(word)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats n words to the left of the cursor to camel case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python.
def camel_case_count(n): saveText = _get_clipboard_text() cutText = _select_and_cut_text(n) if cutText: endSpace = cutText.endswith(' ') text = _cleanup_text(cutText) newText = _camelify(text.split(' ')) if endSpace: newText = newText + ' ' newText = newTe...
[ "def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a list of words and returns a string formatted to camel case.
def _camelify(words): newText = '' for word in words: if newText == '': newText = word[:1].lower() + word[1:] else: newText = '%s%s' % (newText, word.capitalize()) return newText
[ "def convert_to_camel_case(list_of_words):\n upper_case_word = [x.capitalize() for x in list_of_words]\n return ''.join(upper_case_word)", "def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])", "def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats dictated text to pascal case.
def pascal_case_text(text): newText = format_pascal_case(text) Text("%(text)s").execute({"text": newText})
[ "def to_pascal(text: str) -> str:\n return text.title().replace(' ', '')", "def to_pascal_case(snake_case_word):\n parts = iter(snake_case_word.split(\"_\"))\n return \"\".join(word.title() for word in parts)", "def _pascal_case(arg: str):\n # replace _x with X\n tmp = re.sub(\n r\"(?<=[a-...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats n words to the left of the cursor to pascal case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python.
def pascal_case_count(n): saveText = _get_clipboard_text() cutText = _select_and_cut_text(n) if cutText: endSpace = cutText.endswith(' ') text = _cleanup_text(cutText) newText = text.title().replace(' ', '') if endSpace: newText = newText + ' ' newText = n...
[ "def to_pascal(text: str) -> str:\n return text.title().replace(' ', '')", "def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def to_pascal_case(snake_case_word):\n parts = iter(snake_case_word.split(\"_\"))\n return \"\".join(wo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats dictated text to snake case.
def snake_case_text(text): newText = format_snake_case(text) Text("%(text)s").execute({"text": newText})
[ "def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()", "def snakecase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(name).lower()", "def _snake_case(display_name):\n str_re = re.compile('[{0}]'.format(re.escape(string.punctua...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats n words to the left of the cursor to snake case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python.
def snake_case_count(n): saveText = _get_clipboard_text() cutText = _select_and_cut_text(n) if cutText: endSpace = cutText.endswith(' ') text = _cleanup_text(cutText.lower()) newText = '_'.join(text.split(' ')) if endSpace: newText = newText + ' ' newText ...
[ "def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats n words to the left of the cursor to upper case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python.
def uppercase_count(n): saveText = _get_clipboard_text() cutText = _select_and_cut_text(n) if cutText: newText = cutText.upper() newText = newText.replace("%", "%%") # Escape any format chars. Text(newText).execute() else: # Failed to get text from clipboard. Key('c-v')...
[ "def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats dictated text to lower case.
def lowercase_text(text): newText = format_lower_case(text) Text("%(text)s").execute({"text": newText})
[ "def lower_case(self, text):\n return text.lower()", "def LOWER(text):\n return text.lower()", "def to_lower(self):\n\n print('Converting to lowercase...')\n self.__data['text'] = self.__data['text'].str.lower()", "def lowercase(text):\n\n lowercase_text = text.lower()\n return lowercase_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats n words to the left of the cursor to lower case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python.
def lowercase_count(n): saveText = _get_clipboard_text() cutText = _select_and_cut_text(n) if cutText: newText = cutText.lower() newText = newText.replace("%", "%%") # Escape any format chars. Text(newText).execute() else: # Failed to get text from clipboard. Key('c-v')...
[ "def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleans up the text before formatting to camel, pascal or snake case. Removes dashes, underscores, single quotes (apostrophes) and replaces them with a space character. Multiple spaces, tabs or new line characters are collapsed to one space character. Returns the result as a string.
def _cleanup_text(text): prefixChars = "" suffixChars = "" if text.startswith("-"): prefixChars += "-" if text.startswith("_"): prefixChars += "_" if text.endswith("-"): suffixChars += "-" if text.endswith("_"): suffixChars += "_" text = text.strip() text ...
[ "def clean_up_text(text):\n text = text.lower() # to lower case\n text = re.sub(r'[^a-z]', ' ', text) # replace other characters than a-z with ' '\n return text", "def sanitize(text):\n text = str(text).strip().replace(' ', '_')\n return re.sub(r'(?u)[^-\\w.\\/]', '', text)", "def clean...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the text contents of the system clip board.
def _get_clipboard_text(): clipboard = Clipboard() return clipboard.get_system_text()
[ "def display_text(self):\n return self.display_screen.text()", "def getTextFromClipboard(self) -> str:\n cb = self.qtApp.clipboard()\n if cb:\n QtWidgets.QApplication.processEvents()\n return cb.text()\n g.trace('no clipboard!')\n return ''", "def text(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Selects wordCount number of words to the left of the cursor and cuts them out of the text. Returns the text from the system clip board.
def _select_and_cut_text(wordCount): clipboard = Clipboard() clipboard.set_system_text('') Key('cs-left/3:%s/10, c-x/10' % wordCount).execute() return clipboard.get_system_text()
[ "def moveCursorWordLeft(self):\r\n self.SendScintilla(QsciScintilla.SCI_WORDLEFT)", "def moveLeftOneWordPart(self):\r\n self.SendScintilla(QsciScintilla.SCI_WORDPARTLEFT)", "def extendSelectionLeftOneWord(self):\r\n self.SendScintilla(QsciScintilla.SCI_WORDLEFTEXTEND)", "def extendSelecti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a plot of the specified data file and sets the ThumbnailPanel's bitmap accordingly
def plot_thumb(self, data_fname): thumbnail = self.controller.plot_thumb(data_fname, self.bitmap_width, self.bitmap_height) if thumbnail is not None: self.figure_bmp.SetBitmap(thumbnail) else: self.plot_blank()
[ "def filePlot(self):\n if len(self.filename) == 0:\n self.filename = QtGui.QFileDialog.getOpenFileName(None, \\\n \"Open Data File (csv)\", \".\", \"*.csv\")\n \n self._readCSV(self.filename)\n \n self.main_widget = QtGui.QWidget(self)\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to invoke Disable command on SDP Master.
def do(self): this_server = TangoServerHelper.get_instance() try: sdp_master_ln_fqdn = "" property_val = this_server.read_property("SdpMasterFQDN")[0] sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val) sdp_mln_client_obj = TangoClient(sdp_master_ln...
[ "def disable(self):\n\n return self.conn.request(\"POST\", \"/%s/disable\" % self.uuid)", "def disable_mems(self):\n off = 'D001'\n self.send_command(off)\n print('MEMS DISABLE')", "async def disable(self, ctx, *, command: str):\r\n\t\tcommand = command.lower()\r\n\t\tif command in (...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns whether the current instance is an edge server in crosssilo FL.
def is_edge_server() -> bool: return Config().args.port is not None
[ "def is_edge_site(self) -> bool:\n return self.config.edge", "def is_cross(self):\n\n # Cross-zone edge has two neighbour faces from different zones.\n\n faces_count = len(self.Faces)\n\n if faces_count == 1:\n return False\n elif faces_count == 2:\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns whether the current instance is a central server in crosssilo FL.
def is_central_server() -> bool: return hasattr(Config().algorithm, 'cross_silo') and Config().args.port is None
[ "def is_server(self):\n return self._role_maker._is_server()", "def isClientHost(self):\n return self.serverThread is not None", "def server(self) -> bool:\n return pulumi.get(self, \"server\")", "def is_server(self):\n return self.device_type == \"server\" or self.phy.device_type ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the device to be used for training.
def device() -> str: import torch if torch.cuda.is_available() and torch.cuda.device_count() > 0: if hasattr(Config().trainer, 'parallelized') and Config().trainer.parallelized: device = 'cuda' else: device = 'cuda:' + str( ...
[ "def device(self) -> torch.device:\n for param in self.parameters():\n return param.device\n return get_device(\"cpu\")", "def device(self) -> th.device:\n for param in self.parameters():\n return param.device\n return get_device(\"cpu\")", "def get_device():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the hardware and OS support data parallelism.
def is_parallel() -> bool: import torch return hasattr(Config().trainer, 'parallelized') and Config( ).trainer.parallelized and torch.cuda.is_available( ) and torch.distributed.is_available( ) and torch.cuda.device_count() > 1
[ "def is_available():\n return torch._C.has_openmp", "def model_parallel_is_initialized():\n if _MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None:\n return False\n return True", "def has_external_pooling():\n\n\treturn False", "def is_multiprocess_available(self) -> bool:\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return True if the number_str can be truncated from both left and right and always be prime, e.g. 3797
def is_left_right_truncatable(number_str, prime_str_set): l = len(number_str) #left truncatable? for i in range(l): if number_str[i:] not in prime_str_set or number_str[:l-i] not in prime_str_set: return False return True
[ "def is_truncatable(nb):\n nb = str(nb)\n if is_prime(int(nb)):\n for i in range(1, len(nb)):\n if not is_prime(int(nb[i:])) or not is_prime(int(nb[:len(nb)-i])):\n return False\n return True\n else:\n return False", "def substring_divisible(number):\n st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine fixed modifications in case the reference shift is at zero. Does not need localization.
def determine_fixed_mods_zero(aastat_result, data, params_dict): fix_mod_zero_thresh = params_dict['fix_mod_zero_thresh'] min_fix_mod_pep_count_factor = params_dict['min_fix_mod_pep_count_factor'] fix_mod_dict = {} reference = utils.mass_format(0) aa_rel = aastat_result[reference][2] utils.inte...
[ "def _only_fixed(o, d):\n if d[\"fixed\"]:\n return (\"value\", \"fixed\")\n else:\n return (\"fixed\",)", "def _fixed_indicies(self):\n fixed_inds = self.constraints == 'fixed'\n return fixed_inds", "def is_fixed(self):\n return False", "def shift_detection_conv(signa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Compute the Einstein radius for a given isotropic velocity dispersion assuming a singular isothermal sphere (SIS) mass profile
def approximate_theta_E_for_SIS(vel_disp_iso, z_lens, z_src, cosmo): lens_cosmo = LensCosmo(z_lens, z_src, cosmo=cosmo) theta_E_SIS = lens_cosmo.sis_sigma_v2theta_E(vel_disp_iso) return theta_E_SIS
[ "def Wigner_Seitz_radius(n: u.m**-3) -> u.m:\n return (3 / (4 * np.pi * n)) ** (1 / 3)", "def _calculate_residual_sphere(parameters, x_values, y_values, z_values):\n #extract the parameters\n x_centre, y_centre, z_centre, radius = parameters\n\n #use numpy's sqrt function here, which works by element ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the slope and intercept values fit on a sample of ETGs Note The slope and intercept were read off from Fig 7 of [1]_. Values binned by magnitudes are available in [2]_. References
def _define_ETG_fit_params(self): self.slope = 2.0 self.intercept = 5.8
[ "def setSlopeAndIntercept(self, slope, intercept):\n\t\tself.setSlope(slope)\n\t\tself.setIntercept(intercept)\n\t\tself.updatePreview()", "def set_fit_intercept(self, new_fit_intercept=True):\n self.fit_intercept = new_fit_intercept", "def setSlope(self, slope):\n\t\tself.slope = slope", "def fit_slop...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluate the Vband luminosity L_V expected from the FJ relation for a given velocity dispersion
def get_luminosity(self, vel_disp): log_L_V = self.slope*np.log10(vel_disp) + self.intercept return log_L_V
[ "def fwd_voltage(self):\r\n\t\treturn float(self.query(\"LAS:LDV?\"))", "def estimate_velocity_vring_collider(sl, sv):\n print('... estimate_velocity_vring_collider: Only applicable to the piston setting1 (~10cm blob creation)')\n # get module location\n mod_loc = os.path.abspath(__file__)\n pdir, fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the parameters fit on SDSS DR4 Note The values of slope and intercept are taken from the rband orthogonal fit on SDSS DR4. See Table 2 of [1]_. References .. [1] Hyde, Joseph B., and Mariangela Bernardi. "The luminosity and stellar mass Fundamental Plane of earlytype galaxies."
def _define_SDSS_fit_params(self): self.a = 1.4335 self.b = 0.3150 self.c = -8.8979 self.intrinsic_scatter = 0.0578 #self.delta_a = 0.02 #self.delta_b = 0.01
[ "def _define_SDSS_fit_params(self):\n\t\tself.a = 5.7*1.e-4\n\t\tself.b = 0.38\n\t\tself.lower = 0.2", "def _define_SLACS_fit_params(self):\n\t\t# Fit params from R_eff\n\t\tself.a = -0.41\n\t\tself.b = 0.39\n\t\t#self.delta_a = 0.12\n\t\t#self.delta_b = 0.10\n\t\tself.intrinsic_scatter = 0.14\n\t\t# Fit params f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the parameters fit on the Sloan Lens Arcs Survey (SLACS) sample of 73 ETGs Note See Table 4 of [1]_ for the fit values, taken from the empirical correlation derived from the SLACS lens galaxy sample. References
def _define_SLACS_fit_params(self): # Fit params from R_eff self.a = -0.41 self.b = 0.39 #self.delta_a = 0.12 #self.delta_b = 0.10 self.intrinsic_scatter = 0.14 # Fit params from vel_disp self.a_v = 0.07 self.b_v = -0.12 self.int_v = 0.17
[ "def _define_SDSS_fit_params(self):\n\t\tself.a = 1.4335\n\t\tself.b = 0.3150 \n\t\tself.c = -8.8979\n\t\tself.intrinsic_scatter = 0.0578\n\t\t#self.delta_a = 0.02\n\t\t#self.delta_b = 0.01", "def _define_SDSS_fit_params(self):\n\t\tself.a = 5.7*1.e-4\n\t\tself.b = 0.38\n\t\tself.lower = 0.2", "def set_fit_para...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the parameters fit on the SDSS data Note The shape of the distribution arises because more massive galaxies are closer to spherical than less massive ones. The truncation excludes highlyflattened profiles. The default fit values have been derived by [1]_ from the SDSS data. References
def _define_SDSS_fit_params(self): self.a = 5.7*1.e-4 self.b = 0.38 self.lower = 0.2
[ "def _define_SDSS_fit_params(self):\n\t\tself.a = 1.4335\n\t\tself.b = 0.3150 \n\t\tself.c = -8.8979\n\t\tself.intrinsic_scatter = 0.0578\n\t\t#self.delta_a = 0.02\n\t\t#self.delta_b = 0.01", "def _define_SLACS_fit_params(self):\n\t\t# Fit params from R_eff\n\t\tself.a = -0.41\n\t\tself.b = 0.39\n\t\t#self.delta_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sample (one minus) the axis ratio of the lens galaxy from the Rayleigh distribution with scale that depends on velocity dispersion
def get_axis_ratio(self, vel_disp): scale = self.a*vel_disp + self.b q = 0.0 while q < self.lower: q = 1.0 - np.random.rayleigh(scale, size=None) return q
[ "def scale(self):\n return Vector([self.axis.mag(), self.height, self.width]) * 0.5", "def level_mags(slide):\n return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]", "def downsample_ratio(self):\n return self.resolution / self.mip_resolution(0)", "def sphere_volume(r)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Set the parameters fit on the combined sample of more than >80,000 colorselected AGN from ~14 datasets Note
def _define_combined_fit_params(self): self.z_bins = np.array([0.40, 0.60, 0.80, 1.00, 1.20, 1.40, 1.60, 1.80, 2.20, 2.40, 2.50, 2.60, 2.70, 2.80, 2.90, 3.00, 3.10, 3.20, 3.30, 3.40, 3.50, 4.10, 4.70, 5.50, np.inf]) ...
[ "def fit_bma(self):\n if len(self.star.filter_names[self.star.filter_mask]) <= 5:\n print(colored('\\t\\t\\tNOT ENOUGH POINTS TO MAKE THE FIT! !', 'red'))\n return\n thr = self._threads if self._sequential else len(self._interpolators)\n # display('Bayesian Model Averaging...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sample the AGN luminosity from the redshiftbinned luminosity function
def sample_agn_luminosity(self, z): # Assign redshift bin is_less_than_right_edge = (z < self.z_bins) alpha = self.alphas[is_less_than_right_edge][0] beta = self.betas[is_less_than_right_edge][0] M_star = self.M_stars[is_less_than_right_edge][0] # Evaluate function pmf = self.get_double_power_law(alpha, ...
[ "def compute_luminosity(red, green, blue):\r\n return (0.299 * red) + (0.587 * green) + (0.114 * blue)", "def compute_radiocore_luminosity(MBH, L_AGN):\n\tL_X = bolcorr_hardX(L_AGN)\n\tm = log10(MBH / u.Msun)\n\t# Merloni, Heinz & Di Matteo (2003)\n\tlogLR = 0.6 * log10(L_X/(u.erg/u.s)) + 0.78 * m + 7.33\n\tre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
expects 2 arrays of shape (3, N) rigid transform algorithm from
def rigid_transform_3d(xs,ys): assert xs.shape == ys.shape assert xs.shape[0] == 3, 'The points must be of dimmensionality 3' # find centroids and H x_centroid = np.mean(xs, axis=1)[:, np.newaxis] y_centroid = np.mean(ys, axis=1)[:, np.newaxis] H = (xs - x_centroid)@(ys - y_centroid).T ...
[ "def transform(tvec1, rvec1, tvec2, rvec2):\n op = localToGlobal(np.squeeze(tvec2), np.squeeze(rvec2))\n tvec3 = []\n for tvec in tvec1:\n #tvec = tvec.squeeze()\n tvec3.append(np.matmul(op, tvec))\n tvec3 = np.array(tvec3)\n return tvec3", "def get_transformation(k: np.ndarray, r: np...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Synchronize this instance data with that of its parent
def _syncDataWithParent(self): parent = self.parent() if parent is None: data, range_ = None, None else: data = parent.getData(copy=False) range_ = parent.getDataRange() self._updateData(data, range_)
[ "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(copy=False)\n self._updateScenePrimitive()", "def _synchronize(self, obj, child, associationrow, clearkeys):\n raise NotI...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle data change in the parent this plane belongs to
def _parentChanged(self, event): if event == ItemChangedType.DATA: self._syncDataWithParent()
[ "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)", "def _parentChanged(self, event):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return whether values <= colormap min are displayed or not.
def getDisplayValuesBelowMin(self): return self._getPlane().colormap.displayValuesBelowMin
[ "def lowresdisplay():\n w, h = getscreengeom()\n return w < 1400 or h < 700", "def inHorizontalWindow(self, pc):\n return pc.miny() < self.maxy() and self.miny() < pc.maxy()", "def setDisplayValuesBelowMin(self, display):\n display = bool(display)\n if display != self.getDisplayValues...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set whether to display values <= colormap min.
def setDisplayValuesBelowMin(self, display): display = bool(display) if display != self.getDisplayValuesBelowMin(): self._getPlane().colormap.displayValuesBelowMin = display self._updated(ItemChangedType.ALPHA)
[ "def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Synchronize this instance data with that of its parent
def _syncDataWithParent(self): parent = self.parent() if parent is None: self._data = None else: self._data = parent.getData(copy=False) self._updateScenePrimitive()
[ "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n data = parent.getData(copy=False)\n range_ = parent.getDataRange()\n self._updateData(data, range_)", "def _synchronize(self, obj, child,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the level of this isosurface (float)
def getLevel(self): return self._level
[ "def get_level(self):\r\n \r\n return self.level", "def get_level(self):\r\n \r\n return self.level", "def getLevel(self):\n return _libsbml.SBasePlugin_getLevel(self)", "def get_level(self):\n return self.playerLevel", "def getLevel(self):\n return _libsbml...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the value at which to build the isosurface. Setting this value reset autolevel function
def setLevel(self, level): self._autoLevelFunction = None level = float(level) if level != self._level: self._level = level self._updateScenePrimitive() self._updated(Item3DChangedType.ISO_LEVEL)
[ "def set(self, value):\n\n if value == self.Value.kOff:\n hal.setSolenoid(self.forwardHandle, False)\n hal.setSolenoid(self.reverseHandle, False)\n elif value == self.Value.kForward:\n hal.setSolenoid(self.reverseHandle, False)\n hal.setSolenoid(self.forward...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the function computing the isolevel (callable or None)
def getAutoLevelFunction(self): return self._autoLevelFunction
[ "def _get_isis_level(self):\n return self.__isis_level", "def elevation_level():\n return F.udf(lambda x: str(int(x/1000)*1000))", "def _DEFAULT_FUNC_(t):\n\treturn 9.1", "def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the color of this isosurface (QColor)
def getColor(self): return qt.QColor.fromRgbF(*self._color)
[ "def get_color(self):\n return(self.pen_color)", "def get_color(self):\n return self.color", "def color(self):\n return idc.get_color(self.ea, idc.CIC_ITEM)", "def _get_color(self): # pragma: no cover\n return QColorDialog(self).getColor(initial=QColor(\"black\"))", "def color(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute isosurface for current state.
def _computeIsosurface(self): data = self.getData(copy=False) if data is None: if self.isAutoLevel(): self._level = float('nan') else: if self.isAutoLevel(): st = time.time() try: level = float(self.get...
[ "def isosurface(self):\n return self._isosurface()", "def isosurface(self, value=None, flying_edges=True):\n scrange = self._data.GetScalarRange()\n\n if flying_edges:\n cf = vtk.vtkFlyingEdges3D()\n cf.InterpolateAttributesOn()\n else:\n cf = vtk.vtkCo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute range info (min, min positive, max) from data
def _computeRangeFromData(data): if data is None: return None dataRange = min_max(data, min_positive=True, finite=True) if dataRange.minimum is None: # Only non-finite data return None if dataRange is not None: min_positive = dataRange.min_positive ...
[ "def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value", "def calcrange(a4lim,data):\r\n a4range=N.intersect1d(N.where(data>a4lim[0])[0],N.where(data<a4lim[1])[0])\r\n return a4range", "def get_range_parameters(data):\n return data.star...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add an isosurface to this item.
def addIsosurface(self, level, color): isosurface = self._Isosurface(parent=self) isosurface.setColor(color) if callable(level): isosurface.setAutoLevelFunction(level) else: isosurface.setLevel(level) isosurface.sigItemChanged.connect(self._isosurfaceItemC...
[ "def add_surface(self,s):\n self.surfaces.append(s)\n s.system=self.surfaces", "def add_surface_item(self, surfaceitem):\n self._add_surface_item(surfaceitem)", "def isosurface(self, value=None, flying_edges=True):\n scrange = self._data.GetScalarRange()\n\n if flying_edges:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove an isosurface from this item.
def removeIsosurface(self, isosurface): if isosurface not in self.getIsosurfaces(): _logger.warning( "Try to remove isosurface that is not in the list: %s", str(isosurface)) else: isosurface.sigItemChanged.disconnect(self._isosurfaceItemChanged) ...
[ "def remove(self):\n\n self.remove_layer()\n self.remove_geo_resources()", "def remove_surface(self):\n if len(self.contours)>0:\n for contour in self.contours:\n if isinstance(contour,ContourSet):\n for lineset in contour.collections:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle update of isosurfaces upon level changed
def _isosurfaceItemChanged(self, event): if event == Item3DChangedType.ISO_LEVEL: self._updateIsosurfaces()
[ "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle updates of isosurfaces level and add/remove
def _updateIsosurfaces(self): # Sorting using minus, this supposes data 'object' to be max values sortedIso = sorted(self.getIsosurfaces(), key=lambda isosurface: - isosurface.getLevel()) self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]
[ "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle update of the cut plane (and take care of mode change
def _updated(self, event=None): if event == ItemChangedType.COMPLEX_MODE: self._syncDataWithParent() super(ComplexCutPlane, self)._updated(event)
[ "def plane_update(self):\n self.plane.update()", "def vp_update_after_active_tile_selection(self):\n self.ovm.update_all_debris_detections_areas(self.gm)\n self.main_controls_trigger.transmit('SHOW CURRENT SETTINGS')\n self.vp_draw()", "def onUpdateFactors(self, evt):\n\t\tif self.bl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle data change in the parent this isosurface belongs to
def _parentChanged(self, event): if event == ItemChangedType.COMPLEX_MODE: self._syncDataWithParent() super(ComplexIsosurface, self)._parentChanged(event)
[ "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle update of the isosurface (and take care of mode change)
def _updated(self, event=None): if event == ItemChangedType.COMPLEX_MODE: self._syncDataWithParent() elif event in (ItemChangedType.COLORMAP, Item3DChangedType.INTERPOLATION): self._updateScenePrimitive() super(ComplexIsosurface, self)._updated(eve...
[ "def updateSurface(self):\n \n pass", "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def toggle_surface(self):", "def update(self):\n pygame.surfarray.blit_array(self.surface, self.array2d)", "def update_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return 3D dataset. This method does not cache data converted to a specific mode, it computes it for each request.
def getData(self, copy=True, mode=None): if mode is None: return super(ComplexField3D, self).getData(copy=copy) else: return self._convertComplexData(self._data, mode)
[ "def get_3d_train(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_train['3d'][:, to_select, :][:, to_sort, :]", "def get_dataset(self, cid, type=\"train\"):\n dataset = torch.load(\n os.path.join(self.path, type, \"data{}.pkl\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Population prior, i.e. $Categorical(\pi)$.
def prior_z(self) -> distributions.Distribution: return distributions.Categorical(self.pi)
[ "def test_prior_name(self):\n dim = Dimension(\"yolo\", \"reciprocal\", 1e-10, 1)\n assert dim.prior_name == \"reciprocal\"\n\n dim = Dimension(\"yolo\", \"norm\", 0.9)\n assert dim.prior_name == \"norm\"\n\n dim = Real(\"yolo\", \"uniform\", 1, 2)\n assert dim.prior_name =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test vertex_areas. Vertex area is the area of all of the triangles who are in contact
def test_vertex_areas(self, faces, point): number_of_contact_faces = gs.array([3, 5, 5, 5, 5, 5, 3, 5]) triangle_area = 0.5 * 2 * 2 expected = 2 * (number_of_contact_faces * triangle_area) / 3 space = self.Space(faces) result = space.vertex_areas(point) assert result.sha...
[ "def test_get_triangle_area():\n v1 = (0,0); v2 = (1,0); v3 = (0,2)\n verticies = [v1,v2,v3]\n expected = 1\n computed = get_triangle_area(verticies)\n tol = 1E-14\n success = abs(expected-computed) < tol\n msg = 'computed area={} != {} (expected)'.format(computed,expected)\n assert success,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test normals. We test this on a space whose initializing point is a cube, and we test the function on a cube with sides of length 2 centered at the origin. The cube is meshed with 12 triangles (2 triangles per face.) Recall that the magnitude of each normal vector is equal to the area of the face it is normal to.
def test_normals(self, faces, point): space = self.Space(faces=faces) cube_normals = gs.array( [ [0.0, 0.0, 2.0], [0.0, 0.0, 2.0], [0.0, 2.0, 0.0], [0.0, 2.0, 0.0], [2.0, 0.0, 0.0], [2.0, 0.0, 0.0...
[ "def test_surface_normal(self):\n vertices = np.array([[0, 1, 0], [0, 0, 0], [1, 0, 0]])\n expected = np.array([0, 0, 1])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test against multiple triangles\n vertices = np.r_[vertices[np.newaxis, :, :], [[[0,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test surface metric matrices.
def test_surface_metric_matrices(self, faces, point): space = self.Space(faces=faces) result = space.surface_metric_matrices(point=point) assert result.shape == ( space.n_faces, 2, 2, ), result.shape point = gs.array([point, point]) re...
[ "def surface_test(xgrid, ygrid):\n\txfactor = 2*numpy.pi/20\n\tyfactor = 2*numpy.pi/11\n\treturn numpy.sin(xgrid*xfactor) * numpy.cos(ygrid*yfactor)", "def test_surfacegrid():\n elem = omf.surface.TensorGridSurface()\n elem.tensor_u = [1.0, 1.0]\n elem.tensor_v = [2.0, 2.0, 2.0]\n assert elem.validate...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that energy of a path of surfaces is positive at each timestep.
def test_path_energy_per_time_is_positive( self, space, a0, a1, b1, c1, d1, a2, path, atol ): n_times = len(path) space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2) energy = space.metric.path_energy_per_time(path) self.assertAllEqual(energy.shape...
[ "def test_path_energy_is_positive(self, space, a0, a1, b1, c1, d1, a2, path, atol):\n space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2)\n\n energy = space.metric.path_energy(path)\n self.assertAllEqual(energy.shape, ())\n result = gs.all(energy > -1 * atol)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that energy of a path of surfaces is positive at each timestep.
def test_path_energy_is_positive(self, space, a0, a1, b1, c1, d1, a2, path, atol): space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2) energy = space.metric.path_energy(path) self.assertAllEqual(energy.shape, ()) result = gs.all(energy > -1 * atol) sel...
[ "def test_path_energy_per_time_is_positive(\n self, space, a0, a1, b1, c1, d1, a2, path, atol\n ):\n n_times = len(path)\n space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2)\n\n energy = space.metric.path_energy_per_time(path)\n\n self.assertAllEqual...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
goes through each neuron, each neuron has a chance of mutating equal to the learning rate of the network. There is a 20% chance of a physical mutation.
def mutate(self): #First, mutate masses for neuronNum in range(self.neuronCounter - 1): if self.learningRate > random.random(): self.neurons[neuronNum].mutate() else: continue #Now determine physical mutations if random.random() < ...
[ "def weight_mutate(self):\n\n starting_pol = int(self.n_elites)\n while starting_pol < self.pop_size:\n # Output bias weights\n for w in range(self.n_outputs):\n rnum = random.uniform(0, 1)\n if rnum <= self.mut_chance:\n weight = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a CLOUD device has already been added.
def is_cloud_device_already_added(self): for entry in self._async_current_entries(): if entry.unique_id is not None and entry.unique_id == f"{DOMAIN}Cloud": return True return False
[ "def has_device(self, device_key):\r\n return self._devices.has_key(device_key)", "def devicename_exists(devicename):\n # Initialize key variables\n exists = False\n\n # Get information on agent from database\n data = GetDevice(devicename)\n if data.exists() is True:\n exists = True\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the IMDB reviews dataset. Code adapted from the code for
def load_imdb_dataset(): (x_train, y_train), (x_test, y_test) = imdb.load_data( path="./datasets", num_words=_IMDB_CONFIG["max_features"]) num_train = _IMDB_CONFIG["num_train"] x_train, x_val = x_train[:num_train], x_train[num_train:] y_train, y_val = y_train[:num_train], y_train[num_train:] def prepro...
[ "def load_dataset():\n with open(\"../openreview-dataset/results/authors.json\", \"r\") as f:\n authors = json.load(f)\n\n with open(\"../openreview-dataset/results/papers.json\", \"r\") as f:\n papers = json.load(f)\n\n with open(\"../openreview-dataset/results/reviews.json\", \"r\") as f:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse name and seed for uci regression data. E.g. yacht_2 is the yacht dataset with seed 2.
def _parse_uci_regression_dataset(name_str): pattern_string = "(?P<name>[a-z]+)_(?P<seed>[0-9]+)" pattern = re.compile(pattern_string) matched = pattern.match(name_str) if matched: name = matched.group("name") seed = matched.group("seed") return name, seed return None, None
[ "def get_uci_data(name) -> Tuple[chex.Array, chex.Array]:\n spec = DATA_SPECS.get(name)\n if spec is None:\n raise ValueError('Unknown dataset: {}. Available datasets:\\n{}'.format(\n name, '\\n'.join(DATA_SPECS.keys())))\n with tf.io.gfile.GFile(spec.path) as f:\n df = pd.read_csv(f)\n labels = df...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reshapes batch to have first axes size equal n_split.
def batch_split_axis(batch, n_split): x, y = batch n = x.shape[0] n_new = n / n_split assert n_new == int(n_new), ( "First axis cannot be split: batch dimension was {} when " "n_split was {}.".format(x.shape[0], n_split)) n_new = int(n_new) return tuple(arr.reshape([n_split, n_new, *arr.shape[1:...
[ "def split_last_dimension(x, n):\n x_shape = common_layers.shape_list(x)\n m = x_shape[-1]\n if isinstance(m, int) and isinstance(n, int):\n assert m % n == 0\n return tf.reshape(x, x_shape[:-1] + [n, m // n])", "def _reshape_to_batchsize(im):\n sequence_ims = tf.split(im, num_or_size_splits=sequences_p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate through the spike waveforms belonging in the current trace view.
def _iter_spike_waveforms( interval=None, traces_interval=None, model=None, supervisor=None, n_samples_waveforms=None, get_best_channels=None, show_all_spikes=False): m = model p = supervisor sr = m.sample_rate a, b = m.spike_times.searchsorted(interval) s0, s1 = int(round(interval[0...
[ "def waveforms(self):\n return list(self._waveforms)", "def get_template_spike_waveforms(self, template_id):\n spike_ids = self.get_template_spikes(template_id)\n channel_ids = self.get_template_channels(template_id)\n return self.get_waveforms(spike_ids, channel_ids)", "def scan_wav...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Switch between top and bottom origin for the channels.
def switch_origin(self): self.origin = 'bottom' if self.origin == 'top' else 'top'
[ "def move_to_origin(self) -> None:\n\n _bb = self.bb()\n if _bb.x < 0:\n self.translate(abs(_bb.x), 0.0)\n else:\n self.translate(-abs(_bb.x), 0.0)\n\n if _bb.y < 0:\n self.translate(0.0, abs(_bb.y))\n else:\n self.translate(0.0, -abs(_b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Half of the duration of the current interval.
def half_duration(self): if self._interval is not None: a, b = self._interval return (b - a) * .5 else: return self.interval_duration * .5
[ "def half_step_time(self):\n\n return self.full_step_time() * self.half_to_full_step_time_ratio", "def _get_half_time(self):\n return self.__half_time", "def half_life(self) -> u.s:\n return self._get_particle_attribute(\"half_life\", unit=u.s, default=np.nan * u.s)", "def Interval(self) -> f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Go to a specific time (in seconds).
def go_to(self, time): half_dur = self.half_duration self.set_interval((time - half_dur, time + half_dur))
[ "def soak_time(self, soaktime):\n time.sleep(0.1)\n time_now = time.strftime(\"%H:%M\")\n print str(soaktime) + ' minute soak time starts @ ' + time_now \n time_in_sec = soaktime * 60\n time.sleep(time_in_sec)\n print 'Finished soaking'", "def sec_forward():\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shift the interval by a given delay (in seconds).
def shift(self, delay): self.go_to(self.time + delay)
[ "def shift(self, delay):\n self.__begin.shift(delay)\n self.__end.shift(delay)", "def delay(self, delay: int):\n if not delay >= 0:\n raise pyrado.ValueErr(given=delay, ge_constraint=\"0\")\n self._delay = round(delay) # round returns int", "def delay(seconds):\n\n # P...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Go to end of the recording.
def go_to_end(self): self.go_to(self.duration)
[ "def endRecording(self):\r\n return self.vmrun('endRecording')", "def end_step(self):\n self.fh.end_step()", "def stop(self):\n self.recording = False", "def stop_recording(self):\n self.disarm()\n self._recorder.join()", "def end(self):\n self.my_print(\"\\t[DONE]\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Jump to the next spike from the first selected cluster.
def go_to_next_spike(self, ): self._jump_to_spike(+1)
[ "def go_to_previous_spike(self, ):\n self._jump_to_spike(-1)", "def goto_node(self):\n p = self.get_position()\n if p and p != self.c.p:\n self.c.selectPosition(p)", "def jump(self):\n self.lyrics = self.disk[self.index]", "def _next(self, _):\n self.notebook.SetS...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Jump to the previous spike from the first selected cluster.
def go_to_previous_spike(self, ): self._jump_to_spike(-1)
[ "def go_to_next_spike(self, ):\n self._jump_to_spike(+1)", "def jump_to_previous(self):\n self.nvim.command('silent! wincmd p')", "def _prev(self, _):\n self.notebook.SetSelection(self.idx-1)", "def jumpBack(self):\n if self.currentTrajectory is None:\n return\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Toggle between showing all spikes or selected spikes.
def toggle_highlighted_spikes(self, checked): self.show_all_spikes = checked self.set_interval()
[ "def toggle_surface_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if poly in self.hole_list:\n poly.setBrush(QBrush(QColor(255, 255, 255)))\n else:\n poly.setBrush(QBrush(QColor(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Toggle automatic scaling of the traces.
def toggle_auto_scale(self, checked): logger.debug("Set auto scale to %s.", checked) self.auto_scale = checked
[ "def ontogglescale(self, event):\n self._onToggleScale(event)\n try:\n # mpl >= 1.1.0\n self.figure.tight_layout()\n except:\n self.figure.subplots_adjust(left=0.1, bottom=0.1)\n try:\n self.figure.delaxes(self.figure.axes[1])\n except:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select a cluster by clicking on a spike.
def on_mouse_click(self, e): if 'Control' in e.modifiers: # Get mouse position in NDC. box_id, _ = self.canvas.stacked.box_map(e.pos) channel_id = np.nonzero(self.channel_y_ranks == box_id)[0] # Find the spike and cluster closest to the mouse. db = sel...
[ "def click(self, event):\n x, y = self.canvas.invert([event.x, event.y])\n i, j = int(floor(x)), int(floor(y))\n patch = self.get_cell(i, j)\n if patch and patch.state == \"green\":\n cluster = self.get_cluster(patch)\n self.show_cluster(cluster)", "def selected_c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Overloading the addition operator for particles types
def __add__(self, other): if isinstance(other, type(self)): # always create new particles, since otherwise c = a + b changes a as well! p = particles(self) p.pos[:] = self.pos + other.pos p.vel[:] = self.vel + other.vel p.m = self.m p.q = ...
[ "def __add__(self, other):\n if not isinstance(other, Particle):\n return NotImplemented\n mnew = self.mass + other.mass\n vnew = (self.momentum() + other.momentum()) / mnew\n return Particle(mnew, vnew)", "def scalar_add(self, other: Numeric) -> \"Price\":", "def __add__(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Overloading the subtraction operator for particles types
def __sub__(self, other): if isinstance(other, type(self)): # always create new particles, since otherwise c = a - b changes a as well! p = particles(self) p.pos[:] = self.pos - other.pos p.vel[:] = self.vel - other.vel p.m = self.m p.q = ...
[ "def scalar_subtract(self, other: Numeric) -> \"Price\":", "def __sub__(self, other):\n if type(other) in (types.IntType, types.FloatType):\n return self.__add__(-other)\n return self.totalSeconds() - other.totalSeconds()", "def __sub__(self, other: Any) -> Union[Var, AdditionPart]: # ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Overloading the addition operator for fields types
def __add__(self, other): if isinstance(other, type(self)): # always create new fields, since otherwise c = a - b changes a as well! p = fields(self) p.elec[:] = self.elec + other.elec p.magn[:] = self.magn + other.magn return p else: ...
[ "def __add__(self, other):\n if isinstance(other, NXfield):\n return NXfield(value=self.nxdata+other.nxdata, name=self.nxname,\n attrs=self.attrs)\n else:\n return NXfield(value=self.nxdata+other, name=self.nxname,\n attrs=self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Overloading the subtraction operator for fields types
def __sub__(self, other): if isinstance(other, type(self)): # always create new fields, since otherwise c = a - b changes a as well! p = fields(self) p.elec[:] = self.elec - other.elec p.magn[:] = self.magn - other.magn return p else: ...
[ "def scalar_subtract(self, other: Numeric) -> \"Price\":", "def __sub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, Non...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PSNR between two images
def _psnr(img1, img2): mse = np.mean((img1 - img2) ** 2) if mse == 0: return 100 PIXEL_MAX = 1 return (20 * math.log10(PIXEL_MAX)) - (10 * math.log10(mse))
[ "def _comput_PSNR(self, imgs1, imgs2):\n N = imgs1.size()[0]\n imdiff = imgs1 - imgs2\n imdiff = imdiff.view(N, -1)\n rmse = torch.sqrt(torch.mean(imdiff**2, dim=1))\n psnr = 20*torch.log(255/rmse)/math.log(10) # psnr = 20*log10(255/rmse)\n psnr = torch.sum(psnr)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The names of the roles performed by the model. This is required by QtQuick
def roleNames(self): return self._roles
[ "def roles(self):\n return self.m_roles", "def object_role_names(self):\n return (object_role.name for object_role in self.object_roles)", "def roles(self):\n return list(self.roleNamed.values())", "def object_role_names(self):\n return [object_role.name for object_role in self.obj...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The outline of the command used to perform a horizontal run
def horizontalCommand(self): return self._horizontal_command
[ "def print_horizontal_rule():\n\n print \"******************************************\"", "def print_prologue(self, command_name: str, argv: List[str]) -> None:\n # command_text = Text(f\"{command_name}\")\n # if len(argv) > 1:\n # command_text.append(f\" {' '.join(argv[1:])}\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The outline of the command used to perform a vertical run
def verticalCommand(self): return self._vertical_command
[ "def vertical_line(t, n):\n lt(t)\n fd(t,n)\n rt(t)", "def inner_vertical(self):\n raise NotImplementedError()", "def show(cmd, *args, **argv):\n \n context = argv[\"context\"]\n \n commands = context.commands\n context.resolver.import_module(context.commands)\n from _pretty...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The current number of runs
def count(self): return len(self._runs)
[ "def get_runs(self) -> int:", "def run_count(self) -> int:\n return self._run_count", "def run_count(self):\n return self._run_count", "def increment_run_count(self):\n self.run_count += 1", "def next_run_idx(self):\n return self.num_runs", "def get_num_of_executed_iters(self):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to perform a 5 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask5. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of five cons...
def applyWindow5years(imagem, value, bandNames): img_out = imagem.select(bandNames[0]) for i in np.arange(1, len(bandNames)-3): img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)])) img_out = img_out.addBands(imagem.select(bandNames[-3])) img_out = img_out.addBands(imagem.selec...
[ "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(im...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to perform a 4 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask4. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of four cons...
def applyWindow4years(imagem, value, bandNames): img_out = imagem.select(bandNames[0]) for i in np.arange(1, len(bandNames)-2): img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)])) img_out = img_out.addBands(imagem.select(bandNames[-2])) img_out = img_out.addBands(imagem.selec...
[ "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(im...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to perform a 3 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask3. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of three con...
def applyWindow3years(imagem, value, bandNames): img_out = imagem.select(bandNames[0]) for i in np.arange(1, len(bandNames)-1): img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)])) img_out = img_out.addBands(imagem.select(bandNames[-1])) return img_out
[ "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(im...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A helper function to perform a spatial filter based on connectedPixelCount for one land cover class value. Spatial filter was applied to avoid unwanted modifications to the edges of the pixel groups (blobs), a spatial filter was built based on the "connectedPixelCount" function. Native to the GEE platform, this functio...
def majorityFilter(image, params): params = ee.Dictionary(params) minSize = ee.Number(params.get('minSize')) classValue = ee.Number(params.get('classValue')) #Generate a mask from the class value classMask = image.eq(classValue) #Labeling the group of pixels until 100 pixels connected ...
[ "def count_neighbours(self, mask):\n from scipy.ndimage.filters import convolve\n\n mask = mask.astype('uint8')\n filter_args = {'mode': self.boundary}\n if self.boundary == 'empty':\n filter_args['mode'] = 'constant'\n filter_args['cval'] = 0\n elif self.bou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to perform a spatial filter based on connectedPixelCount for land cover class values defined in filterParams. Calls the function majorityFilter. Spatial filter was applied to avoid unwanted modifications to the edges of the pixel groups (blobs), a spatial filter was built based on the "connectedPixelCount" fun...
def applySpatialFilter(image,filterParams): #Loop through list of parameters and apply spatial filter using majorityFilter for params in filterParams: image = majorityFilter(ee.Image(image),params) return image
[ "def majorityFilter(image, params):\n params = ee.Dictionary(params)\n minSize = ee.Number(params.get('minSize'))\n classValue = ee.Number(params.get('classValue'))\n \n #Generate a mask from the class value\n classMask = image.eq(classValue)\n \n #Labeling the group of pixels until 100 pixe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to perform a forward moving gap fill for all years in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The forward gap fill is applied iteratively from the first year of bandNames through the final year, where if the current image has missing data...
def applyForwardNoDataFilter(image, bandNames): #Get a list of band names from year(1) through the last year bandNamesEE = ee.List(bandNames[1:]) #Define forwards filter #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first ...
[ "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]),...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to perform a backward moving gap fill for all years in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The backward gap fill is applied iteratively from the last year of bandNames through the first year, where if the current image has missing dat...
def applyBackwardNoDataFilter(image, bandNames): #Get a list of band names to iterate over, from year(-2) through year(0) bandNamesEE = ee.List(bandNames[:-1]).reverse() #Define backwards filter #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the cl...
[ "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to apply forward gap filling and backward gap filling to an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. This funciton calls applyForwardNoDataFilter then applyBackwardNoDataFilter
def applyGapFilter(image, bandNames): filtered = applyForwardNoDataFilter(image, bandNames) filtered = applyBackwardNoDataFilter(filtered, bandNames) return filtered
[ "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to apply an incidence filter. The incidence filter finds all pixels that changed more than numChangesCutoff times and is connected to less than connectedPixelCutoff pixels, then replaces those pixels with the MODE value of that given pixel position in the stack of years.
def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6): #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff num_changes = calculateNumberOfChanges(image, bandNames) too_many_changes = nu...
[ "def apply(self, src, dst):\n cv2.filter2D(src, -1, self._kernel, dst)", "def applyInterpixCrosstalk(self):\n import copy\n ICTcoeff = np.array([[self.information['c1'], self.information['c2'],self.information['c3']],[self.information['c4'],self.information['c5'],self.information['c6']],[self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to apply an frequency filter. This filter takes into consideration the occurrence frequency throughout the entire time series. Thus, all class occurrence with less than given percentage of temporal persistence (eg. 3 years or fewer out of 33) are replaced with the MODE value of that given pixel position in the...
def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): #Grab land cover classes as a list of strings lc_classes = classDictionary.keys().getInfo() #Get binary images of the land cover classifications for the current year binary_class_images = npv.convertClassificationsTo...
[ "def frequency_filter(fc, L, srf, KIND=2):\n\n if hasattr(KIND, \"__len__\"):\n PASS = KIND\n KIND = 2\n else:\n PASS = [2,3]\n KIND = [KIND]\n\n # fourier transform of lateral inhibitory function \n\n # tonotopic axis\n if issubclass(type(fc), str):\n fc = float(fc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to apply a probability filter to land cover probabilities in each image of imageCollection. The user defines which classes will be filtered and how to filter them in the params list. The params list is a list of dictionaries, one for each class the user wants to filter.
def applyProbabilityCutoffs(imageCollection, params): #Define function to map across imageCollection def probabilityFilter(image): #Get the classifications from the class with the highest probability classifications = npv.probabilityToClassification(image) ...
[ "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassif...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of features in the processed data. Returns int Feature size.
def get_num_features(self): return len(self[0]['x'])
[ "def get_number_of_features(self):\n return len(self.__features)", "def num_features(self) -> int:\n return self._num_features", "def n_features(self):\n return len(self.features_list)", "def number_of_features(self):\n return len(self.dv.feature_names_)", "def count_num_features...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }