query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Format warnings for printing. Returns a list of warning strings with indentation.
def toStringList( self, indent='', dIndent=' ' ): s = ['%s%s' % (indent, self.message)] for warning in self.warningList: s += warning.toStringList(indent + dIndent) return s
[ "def pretty_print(self, warnings=False):\n msg = []\n if (warnings) and (len(self.warnings) > 0):\n msg.append(u\"Warnings:\")\n for warning in self.warnings:\n msg.append(u\" %s\" % warning)\n if len(self.errors) > 0:\n msg.append(u\"Errors:\")\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the plot title when the button is clicked with the value present in the title input box
def setPlotTitle(self): plot_title = self.input_plot_title.text() if plot_title: self.plot_title = self.input_plot_title.text() # Redraw the plot with given title if not self.plot_inverted: self.drawPlot(self.data_x_axis, self.data_y_axis, self.label_x...
[ "def set_plot_title(self):\n plot_title = self.input_plot_title.text()\n if plot_title:\n self.plot_title = self.input_plot_title.text()\n # Redraw the plot with given title\n if not self.plot_inverted:\n self.draw_plot(self.data_x_axis, self.data_y_axis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the title of a widget
def get_widget_title(widget): if widget['title'] != '': return widget['title'] else: return widget['metadata']['panels'][0]['items'][0]['jaql']['title']
[ "def get_widget_title(*args):\n return _ida_kernwin.get_widget_title(*args)", "def title(self):\n return self.window.WindowText()", "def get_window_title(self): # real signature unknown; restored from __doc__\n return \"\"", "def title(self):\n return self.figure.canvas.get_window_title(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the item on which the widget must be sorted, if any
def get_widget_sorted_item(widget): for panel in widget['metadata']['panels']: for item in panel['items']: if 'sort' in item['jaql']: return item['jaql']['title'] return None
[ "def item_comparer(self):\n return self.item_comparer_value", "def OnCompareItems(self, item1, item2):\r\n\r\n return cmp(self.GetItemText(item1), self.GetItemText(item2))", "def get_sort_key(self, item):\n return item.number", "def get_item_search_order(self):\n return # osid.asse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the list of widget IDs in the order of appearance from the layout object
def get_dashboard_ordered_widget_ids(dashfile_data): ordered_widget_ids = [] for column in dashfile_data['layout']['columns']: for cell in column['cells']: for subcell in cell['subcells']: for element in subcell['elements']: ordered_widget_ids.append(eleme...
[ "def layout_widgets(layout):\n\n return [layout.itemAt(i).widget() for i in range(layout.count())]", "def _layout_widgets(layout):\n all_widgets = []\n for i in range(layout.count()):\n item = layout.itemAt(i).widget()\n if type(item) is not QSpacerItem:\n all_widgets.append(item...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the list of widget titles present on the given dashboard, following the order defined in the dashboard layout
def get_ordered_titles(dashfile_data): ordered_widget_ids = get_dashboard_ordered_widget_ids(dashfile_data) kpis_id_title_mapping = {} charts_id_title_mapping = {} tables_id_title_mapping = {} kpi_titles = [] chart_titles = [] table_titles = [] # Create a local mapping between the widg...
[ "def get_dashboard_ordered_widget_ids(dashfile_data):\n ordered_widget_ids = []\n for column in dashfile_data['layout']['columns']:\n for cell in column['cells']:\n for subcell in cell['subcells']:\n for element in subcell['elements']:\n ordered_widget_ids.a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the dashboard kpi mappings file Add a dict object which key is the dashboard name and values are the dashboard's KPI titles This mapping is used by diff_kpis to name the kpis that are compared
def update_mappings(dashboard_data, mappings_file): logging.info('update_mappings') with open(mappings_file, 'r') as f: try: data = json.load(f) except ValueError: data = {} if dashboard_data.slug not in data: data[dashboard_data.slug] = {} table_data = [...
[ "def _create_yaml_map(self):", "def _update_database_map(self, path):\n if path:\n filename = path + '/APD_MAP.txt'\n else:\n filename = 'APD_MAP.txt'\n filepointer = open(filename, 'w')\n for invariom, molecule in self.map.items():\n filepointer.write(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds an import and export function to a class under the name export_data and import_data. This is currently unused and maybe should be removed. The original intention was to be able to add methods to allow json compatible output dictionaries to external functions.
def add_base_class( existing_object: Any, import_method: Callable[[Any], Any], export_method: Callable[[Any], Any], ): existing_object.export_data = types.MethodType(export_method, existing_object) existing_object.import_data = types.MethodType(import_method, existing_object)
[ "def export(*args, **kwargs):\n\tdef wrapper(method):\n\t\tmethod.export_params = [ args, kwargs ]\n\t\treturn method\n\treturn wrapper", "def export_function(self, name, func):\n self._funcs[name] = func", "def addfunctions2new(abunch, key):\n snames = [\n \"BuildingSurface:Detailed\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A property for the location of any parameter files that can be used to build the skeleton of the bill of materials. Returns list A list of locations of the .json that make up the paramaters to be assembled into the skeleton. Raises ConfigurationNotFullyPopulated If the property is called but the configuration has not b...
def parameters(cls) -> list: if cls._parameters is None: msg = ( "location of any files which contain json parameters " "to be assembled required as a list of strings" ) run_log.error(msg) raise ConfigurationNotFullyPopulated(msg) ...
[ "def parts(cls) -> list:\n if cls._parts is None:\n msg = (\n \"location of any files which contain json parts\"\n \" to be assembled required as a list of strings\"\n )\n run_log.error(msg)\n raise ConfigurationNotFullyPopulated(msg)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A property for the locations of the .json that will be loaded into the transaltor. Returns list Location of translation file. Raises ConfigurationNotFullyPopulated If the property is called but the configuration has not been populated and has the property is None.
def translations(cls) -> list: if cls._translations is None: msg = ( "translation location not defined, the file location" "required as a list of strings" ) run_log.error(msg) raise ConfigurationNotFullyPopulated(msg) return...
[ "def locations(self) -> Optional[pulumi.Input[List[pulumi.Input[str]]]]:\n return pulumi.get(self, \"locations\")", "def parts(cls) -> list:\n if cls._parts is None:\n msg = (\n \"location of any files which contain json parts\"\n \" to be assembled required ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The parts section of the configuration are used by the parsers to contain information about parts to be parsed to form the skeleton (dictionary form) of a Bill of Materials. Returns list The object assigned to parts. Raises ConfigurationNotFullyPopulated If the property is called but the configuration has not been popu...
def parts(cls) -> list: if cls._parts is None: msg = ( "location of any files which contain json parts" " to be assembled required as a list of strings" ) run_log.error(msg) raise ConfigurationNotFullyPopulated(msg) return c...
[ "def config(self):\n package = self.package\n if not hasattr(self, '_partconfig'):\n self._partconfig = {}\n\n if package not in self._partconfig:\n self._partconfig[package] = Parts(package, *self.parts)\n return self._partconfig[package]", "def _get_parts(self, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The working directory for the Configuration. Returns str Path to working directory.
def working_dir(cls) -> str: return cls._working_dir
[ "def get_working_dir(self):\n current_working_directory = os.getcwd()\n return current_working_directory", "def get_working_dir() -> str:\n return os.getcwd()", "def get_working_dir(self):\r\n return self.process.get_working_dir()", "def work_dir(self) -> Optional[str]:\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The default parameter type that will be assigned to all components and assemblies attribute params. Returns str String path to the param type.
def default_param_type(cls) -> str: return cls._default_param_type
[ "def add_default_params(self):\r\n self.params = class_from_string(\r\n BaseFramework._configuration._default_param_type\r\n )()", "def param_type(self):\n return self._param_type", "def paramtype(self):\n return self._paramtype", "def get_param(params, key, defaults_typ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the temporary directory and also changes the log handler to write in this directory.
def temp_dir(cls, value: Union[str, Path]): start_message = ( f"Configuration Details\n\n{pprint.pformat(cls.to_dict(), indent=4)}" ) cls._temp_dir = value change_handler(f"{value}/run.log") run_log.info(start_message)
[ "def setTmpDir(self):\n\t\tif os.name != 'nt':\n\t\t\t# On unix use /tmp by default\n\t\t\tself.tmpDir = os.environ.get(\"TMPDIR\", \"/tmp\")\n\t\t\tself.tmpDir = os.environ.get(\"TMP\", self.tmpDir)\n\t\telse:\n\t\t\t# On Windows use the current directory\n\t\t\tself.tmpDir = os.environ.get(\"TMPDIR\", \"\")\n\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The plot directory for outputs. Returns str Path to the plot directory.
def plot_dir(cls) -> Union[str, Path]: if cls._plot_dir is None: msg = "plot_dir not supplied, defaulting to working_dir" run_log.warning(msg) return cls.working_dir else: return cls._plot_dir
[ "def graphs_directory():\n return os.path.join(output_directory(), \"graphs\")", "def get_output_path(self):\n return os.path.join(\"render-output\", str(self.id))", "def get_plot_filename(self) -> str:\n if self.url is None:\n return None\n return join(self.plot_output_dir, s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
defines the config file. The config can be loaded from a supplied dictioanry or from a path. The intention in using a classmethod is that the config can be imported at any stage in a process after initialisation without reloading.
def define_config( cls, config_dict: dict = {}, config_path: Optional[Union[str, Path]] = None ): config = {} if config_path is not None: with open(Path(config_path), "r") as f: config = json.load(f) UpdateDict(config, config_dict) cls.update_confi...
[ "def init_config(cls):\n\n yaml = YAML(typ='safe', pure=True)\n config_dict = None\n config_filepath = os.getenv('HOME') + '/multi-agent-exploration/config/config.yaml'\n \n with open (config_filepath, 'r') as config_file:\n config_dict = yaml.load(config_file)\n\n for name, value in defaul...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the config from given key word arguments. As Config utilises classmethods an classmethod is required to update it.
def update_config(cls, **kwargs): for key, val in kwargs.items(): setattr(cls, key, val)
[ "def update_args(config, args):\n for k, v in vars(args).items():\n if v is not None:\n config[k] = v\n print_dict_as_table(config, \"Received parameters form command line (or default):\")", "def update_configuration(**config_dict):\n global __configuration\n update_key(DIRECTORY,con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts the configuration into a dictionary format. A check is used so that properties, instances and variables within the BaseCLass are not output. Returns dict A dictionary containing all the variables specific to the class.
def to_dict(cls) -> dict: variables = dict() for key, val in vars(cls).items(): check = [ isinstance(val, property), isinstance(val, classmethod), key in vars(BaseClass).keys(), key == "_login_details", ] ...
[ "def get_full_configuration(self) -> dict:\n\n return {\n input_instance.key: input_instance.argument_value\n for input_instance in self.all_input_instances\n }", "def as_dict(self) -> Dict[str, Any]:\n\n configDict: Dict[str, Any] = {\n 'entities': self._enti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inputs login details. This method stores login details if they are required.
def input_login_details(cls, domain: str = ""): cls._login_details["username"] = str(input("username: ")) cls._login_details["password"] = str(getpass()) cls._login_details["domain"] = domain
[ "def login(self):\n self._username = input(\"Username:\")\n self._password = getpass.getpass(\"Password:\")", "def login():", "def login(self):\n self.open(self.urls['login'])\n self.select_form(nr=0)\n\n self.form['custno'] = self.username\n self.form['password'] = sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the login details update if any of the values are None. Returns dict The populated login details.
def login_details(cls) -> dict: if None in list(cls._login_details.values()): cls.input_login_details() return cls._login_details
[ "def input_login_details(cls, domain: str = \"\"):\n cls._login_details[\"username\"] = str(input(\"username: \"))\n cls._login_details[\"password\"] = str(getpass())\n cls._login_details[\"domain\"] = domain", "def login(self, details):\n if self._session_key is None:\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The variables in the data. Returns np.ndarray An array of the dataframe index, if exist.
def vars(self) -> np.ndarray: if isinstance(self.data, pd.DataFrame) is False: return np.array([]) else: return np.array(self.data.index)
[ "def _get_index_array(self):\n table_index = self._parameter_root['tb_names'].value[0]\n nbr_points = len(self._tables[\n table_index][self._tables[table_index].dtype.names[0]])\n index_array = np.arange(0, nbr_points)\n return index_array", "def index(self, variables):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The column count in the wrapped dataframe. Returns int Interger if a dataframe exists. Note As the DataFrame is checked and mypy finds it to return Any, the output of the shape is also found to return any.
def col_count(self): if isinstance(self.data, pd.DataFrame) is False: return None else: return self.data.shape[1]
[ "def missing_col_cnt(df: EDAFrame) -> Any:\n nulls = df.nulls\n rst = nulls.sum(0)\n rst = rst[rst > 0]\n\n return (rst > 0).sum()", "def count_columns(self):\n return len(self.columns)", "def columnCount(self, parent: QModelIndex = ...) -> int:\n if self.orientation == Qt.Horizontal:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compiles all dataframes for a given storage_str into a mutable top level dataframe.
def compile_all_df(self, assembly: Any, child_str: str): self.compiled = child_str storages = np.array( [ output[child_str] for key, output in assembly.lookup(child_str).items() if output[child_str] is not None and key != assemb...
[ "def compile_df(\n basepath: str,\n) -> pd.DataFrame:\n df = []\n\n basepaths = [p.parent for p in Path(basepath).expanduser().rglob(\"c2sts.csv\")]\n\n for basepath in tqdm(basepaths):\n row = {}\n\n # Read hydra config\n path_cfg = basepath / \".hydra/config.yaml\"\n if pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import a function given the string formatted as `module_name.function_name` (eg `django.utils.text.capfirst`)
def import_function(s): a = s.split('.') j = lambda x: '.'.join(x) return getattr(import_module(j(a[:-1])), a[-1])
[ "def import_function(name: str):\n module_name, function_name = name.rsplit(\".\", 1)\n module = importlib.import_module(module_name)\n return getattr(module, function_name)", "def import_function(name):\n module_name, function_name = name.rsplit(\".\", 1)\n module = importlib.import_module(module_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sort the list of graph nodes according to their Degree.
def sorted_nodes_list(self): full_sorted_node_list = map(lambda k: k[0], sorted(self.graph.degree(), key=lambda k: k[1], reverse=True)) return full_sorted_node_list
[ "def sort_by_energy_fraction(graph, nodes):\n sorted_nodes = []\n for node in nodes:\n sorted_nodes.append(graph.node[node])\n return sorted(sorted_nodes, key=lambda x: x['energy_fraction'])", "def nodes_sort(self):\n\n # sort face sets\n for (face_name, f_node_set) in self.faces.ite...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Utility function that reads test_case info from json file.
def __read_test_case(test_case): # type: (str) -> Optional[dict] with open('data/calculator.json') as json_file: data = json.load(json_file) return data[test_case] if data[test_case] else None
[ "def get_test_data():\n try:\n with open(DATA_FILE,'r') as json_in:\n j = json.load(json_in)\n return j\n except Exception as e:\n print \"error loading test JSON\",e\n sys.exit()", "def test_from_json(json_file: str):\n with open(json_file, \"r\") as file:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print DMA test result and append it to results list.
def _process_dma_result(compute_node, testfunc, result, results_list, node): if result: logger.info( 'Test case for {0} with DMA PASSED on {1}.'.format( node, testfunc)) else: logger.error( 'Test case for {0} with DMA FAILED ...
[ "def _print_result_of_dma(compute_ids, results):\n compute_node_names = ['Node-{}'.format(i) for i in range(\n len((compute_ids)))]\n all_computes_in_line = ''\n for compute in compute_node_names:\n all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute)))\n line_of_nodes = '| Te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print results of DMA.
def _print_result_of_dma(compute_ids, results): compute_node_names = ['Node-{}'.format(i) for i in range( len((compute_ids)))] all_computes_in_line = '' for compute in compute_node_names: all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute))) line_of_nodes = '| Test ...
[ "def print_results(self):\n pass", "def print_results(self, results):\r\n\r\n for result in results:\r\n self.print_result(result)", "def print_queue(self):\n trav = self.queue\n while trav is not None:\n print \"%d \" % trav.data,\n trav = trav.next\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check DMA of each compute node.
def dma_main(bt_logger, conf, computes): global logger logger = bt_logger compute_ids = [] agent_results = [] for compute_node in computes: node_id = compute_node.get_id() compute_ids.append(node_id) agent_server_running = conf.is_dma_server_running(compute_node) ag...
[ "def _process_dma_result(compute_node, testfunc,\n result, results_list, node):\n if result:\n logger.info(\n 'Test case for {0} with DMA PASSED on {1}.'.format(\n node, testfunc))\n else:\n logger.error(\n 'Test case for {0} wit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return for the current row of the books dataFrame the rate of the first review
def return_first_review_overall(x, review_books_df): asin = x.name return review_books_df.query('asin == @asin').sort_values(by=['unixReviewTime']).iloc[0].overall
[ "def get_first_row_analyzed_rate(self):\n return self.get_text_from_element(self.first_row_analyzed_rate_span_locator)", "def review_rating(review):\n return review[1]", "def rating(self):\n average = self.review.all().aggregate(Avg('rating'))['rating__avg']\n if not average:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return for the current row of the matching dataFrame a list of dictionary containing n first rating attributed to each book
def find_n_reviews(x, n, review_books_df): asin_1 = x['asin_1'] asin_2 = x['asin_2'] overall_reviews_1 = review_books_df.query('asin == @asin_1').sort_values( 'unixReviewTime').iloc[0:(n+1)].overall.tolist() overall_reviews_2 = review_books_df.query('asin == @asin_2').sort_values( 'unixReviewTi...
[ "def format_ratings(data_ratings: pd.DataFrame):\n\n #suppresion des doubons\n data_ratings.drop_duplicates(inplace=True)\n\n #suppresion des doubons\n data_ratings.dropna(inplace=True) \n\n data_ratings = data_ratings.rename(\n columns = {\n \"user_id\": \"user_id\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return for the current row of the matching dataFrame a list of dictionary containing the mean of all the review from the nth
def mean_more_n_reviews(x, n, review_books_df): asin_1 = x['asin_1'] asin_2 = x['asin_2'] dic_1 = {} dic_2 = {} if (len(review_books_df.query('asin == @asin_1')) > n and len(review_books_df.query('asin == @asin_2')) > 20): overall_reviews_1 = review_books_df.query('asin == @asin_1'...
[ "def get_mean_of_all_genres(df, merged):\n all_genres = get_all_genres_from_df(df)\n mean_genres = {}\n for genres in all_genres:\n mean_genres[genres] = df['rating'][df[genres] == 1].mean()\n\n\n change_nan(mean_genres) # change Nan value\n\n\n for genres in all_genres:\n merged.loc[me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the letter H or L from a rating value in this particular case return H when val is in between 4 and 5 and L otherwise
def return_category_from_value_HL(val): val = int(val.values[0]) if val >= 4: return 'H' else: return 'L'
[ "def get_rating(mpg):\n if mpg < 14:\n return 1\n elif mpg < 15:\n return 2\n elif mpg < 17:\n return 3\n elif mpg < 20:\n return 4\n elif mpg < 24:\n return 5\n elif mpg < 27:\n return 6\n elif mpg < 31:\n return 7\n elif mpg < 37:\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return a list of dictionarries containing the mean and the interval of confidence at 95% of each column of the specified df except asin col
def compute_stats_on_reviews_df(df): stats = [] for col in df.columns: if col != 'asin': mean = np.mean(df[col]) if np.std(df[col]) != 0: b = st.t.interval(0.95, len(df[col])-1, loc=mean, scale=st.sem(df[col])) else: b = (np.nan, np.nan...
[ "def get_confidence_intervals(df, column, ci_level=0.99):\n\n # group all the data at each date\n d = {}\n for name, group in df.groupby(['date']):\n d[name] = group\n\n # for each timepoint, calculate the CI\n for df in d.values():\n df['ci'] = calculate_ci(df[column], ci_level=ci_leve...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the error value from an interval (value needed for errorplot)
def error_from_interval(interval): return (interval[1] - interval[0]) / 2
[ "def get_val_error(self):\n return self.val_error", "def error(line, data):\n\t# Metric: Sum of squared Y-axis difference\n\terr = np.sum((data[:, 1] - (line[0] * data[:, 0] + line[1])) ** 2)\n\treturn err", "def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1", "def yerr(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
plot the errorplot with each longterm ratings mean.
def plot_lastreviews_means_and_errors(H_in_HL_mean, H_in_HL_error, L_in_HL_mean, L_in_HL_error, H_in_HH_mean, H_in_HH_error, H_in_HM_mean, H_in_HM_error, M_in_HM_mean, M_in_HM_error): # plot the result in a nice plot plt.figure(figsize=(1...
[ "def plot_errors(self):\n\n plt.title(\"Prediction Error\")\n plt.plot(self.errors)\n plt.ylabel(\"MSE (Mean Squared Error)\")\n plt.xlabel(\"Iteration\")\n plt.show()", "def plot_misclassification_error(self):", "def plot_error(self, maxstep=20):\n plt.ion()\n p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to authenticate the Spotify API with client credentials flow manager.
def authenticate_spotify_api(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET): auth_manager = SpotifyClientCredentials(client_id = SPOTIPY_CLIENT_ID, client_secret=SPOTIPY_CLIENT_SECRET) return spotipy.Spotify(auth_manager=auth_manager)
[ "def authorize(self):\n\t\ttry:\n\t\t\tauth_url = 'https://accounts.spotify.com/api/token'\n\t\t\theaders={}\n\t\t\tdata={}\n\n\t\t\tdata_string = f\"{self.client_id}:{self.client_secret}\"\n\n\t\t\tdata_bytes = data_string.encode(\"ascii\")\n\t\t\tbase_bytes = base64.b64encode(data_bytes)\n\t\t\tbase_message = bas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to initialize the lyrics_extractor class and to authenticate the google custom search engine.
def authenticate_extract_lyrics(GCS_API_KEY, GCS_ENGINE_ID): # Initialize lyrics_extractor class return SongLyrics(GCS_API_KEY, GCS_ENGINE_ID)
[ "def __init__(self, name='google'):\n self.engine_info = filter(lambda x: 'NAME' in x.keys() and x['NAME'] is name, SMARTSEARCH_AVAILABLE_ENGINES)[0]\n self.connection = build('customsearch', 'v1', developerKey=self.engine_info['GOOGLE_SITE_SEARCH_API_KEY'])", "def __init__(self):\n\n self.sp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to get the album image urls from tracks with the Spotify API, given a list of track id's.
def get_img_urls(track_ids, sp): # Get a list with track information using a list of track id's tracks = sp.tracks(track_ids) # Initialize list to append image urls to img_urls = [] for i in range(len(tracks['tracks'])): images = tracks['tracks'][i]['album']['images'] seq = [x['height...
[ "def _list_of_uris_helper(playlist, tracks: List[Track], spotify_method):\n playlist_id = playlist['id']\n curr = 0\n offset = 100\n uris = [track.uri for track in tracks]\n\n while curr < len(tracks):\n spotify_method(playlist_id, uris[curr: curr + offset])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to make a radar chart of the audio features of a song.
def radar_chart(song, dataset): # Reset the index of the song dataframe song = song.reset_index(drop = True) # Normalize the audio features of the song using the audio features of the database. song['tempo_norm'] = (song['tempo'] - dataset['tempo'].min())/(dataset['tempo'].max()- dataset['tempo'].min())...
[ "def visualize_audio_features(audio_filepath):\n\taudio, sr = librosa.load(audio_filepath)\n\n\t# set figure\n\tplt.figure(figsize=(12, 8))\n\n\t# Visualize SFTF Power Spectrogram\n\tD = librosa.amplitude_to_db(np.abs(librosa.stft(audio)), ref=np.max)\n\tplt.subplot(4, 2, 1)\n\tlibrosa.display.specshow(D, y_axis=\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create dataframes for the song data and the id lookup table from csv files.
def load_data_csv(): # Load lookup table path = 'data/id_lookup.csv' lookup_table = pd.read_csv(path, index_col=0) # Load song data path2 = 'data/data_lyrics_features.csv' data = pd.read_csv(path2, index_col=0) return data, lookup_table
[ "def get_dataframes(csvfile, spec=SPEC):\n tables = [t for csv_segment, pdef in Reader(csvfile, spec).items()\n for t in extract_tables(csv_segment, pdef)]\n emitter = Emitter(tables)\n return {freq: emitter.get_dataframe(freq) for freq in FREQUENCIES}", "def load_dataset():\n dflist = []...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create dataframes for the song data and the id lookup table from sql tables
def load_data_sql(): conn = mysql.connect(**st.secrets["mysql"]) data = pd.read_sql('SELECT * FROM song_data', conn) lookup_table = pd.read_sql('SELECT * FROM lookup_table', conn) return data, lookup_table
[ "def get_song_table(df_song_data):\n df_song = df_song_data.dropDuplicates((['song_id'])).select(\"song_id\", \"title\", \"artist_id\", \"year\", \"duration\")\n \n return df_song.withColumn(\"year\", col(\"year\").cast(IntegerType()))\\\n .withColumn(\"duration\", col(\"duration\").cast(D...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to clean the lyrics. It lowercases the lyrics, tokenizes it and removes all stopwords.
def clean_lyrics(data): #Initialize list to store clean data, tokenizer and the set of stopwords cleaned_data = [] tokenizer = RegexpTokenizer(r'\w+') stopword_set = set(stopwords.words('english')) # Clean data for all the lyrics in the list for doc in data: # Get lowercase of lyrics string...
[ "def remove_stopwords_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = [token for token in tokens\n if token.lower() not in self.stopword_list]\n self.doc = ' '.join(cleaned_tokens)", "def clean(text):\n\n lower_proper = src.utils.nlp.lower_with_prope...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to download the nltk stopwords, necessary for downloading them in deployed streamlit.
def download_nltk(): nltk.download('stopwords') return
[ "def __get_stopwords():\n\n try:\n stopwords = nltk.corpus.stopwords.words('english')\n except LookupError:\n nltk.download('stopwords')\n stopwords = nltk.corpus.stopwords.words('english')\n\n return stopwords", "def build_stopwords():\r\n\tprint('\\nbuilding stopwords')\r\n\t\r\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to create a playlist on the Spotify account of the authenticated user.
def create_playlist(user_id, sp, recommendations, name, description): # Get current user ID current_user = sp.current_user() current_user_id = current_user['id'] # Get list of track ID's track_id_list = list(recommendations['id'].values) # Create Empty playlist sp.user_playlist_create(u...
[ "def create_playlist(auth_header, spotify_user_id, playlist_name, activity_id):\n\n payload = { \n 'name' : playlist_name\n }\n\n USER_PLAYLIST_ENDPOINT = \"{}/{}/{}/{}\".format(SPOTIFY_API_URL, 'users', spotify_user_id, 'playlists')\n playlist_data = requests.post(USER_PLAYLIST_ENDPOINT, dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
OrganizationPolicyAssignmentResponse The model defined in huaweicloud sdk
def __init__(self, owner_id=None, organization_id=None, organization_policy_assignment_urn=None, organization_policy_assignment_id=None, organization_policy_assignment_name=None, description=None, period=None, policy_filter=None, parameters=None, policy_definition_id=None, created_at=None, updated_at=None): ...
[ "def __assign_policy_def(self):\n\n self.logger.info(\n f\"Creating policy assignment of definition {self.policy_id} to assignment {self.assignment_id}\"\n )\n policy_assignment_res = self.interactor.put_policy_assignment(\n self.policy_id, self.assignment_id\n )\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the owner_id of this OrganizationPolicyAssignmentResponse. 组织合规规则创建者。
def owner_id(self): return self._owner_id
[ "def owner(self):\n return Organization.objects.get(id=self.owner_id)", "def owner_id(self) -> int:\n return pulumi.get(self, \"owner_id\")", "def organization_policy_assignment_id(self):\n return self._organization_policy_assignment_id", "def organization_policy_assignment_urn(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the organization_policy_assignment_urn of this OrganizationPolicyAssignmentResponse. 组织合规规则资源唯一标识。
def organization_policy_assignment_urn(self): return self._organization_policy_assignment_urn
[ "def organization_policy_assignment_id(self):\n return self._organization_policy_assignment_id", "def organization_policy_assignment_urn(self, organization_policy_assignment_urn):\n self._organization_policy_assignment_urn = organization_policy_assignment_urn", "def organization_policy_assignment_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the organization_policy_assignment_urn of this OrganizationPolicyAssignmentResponse. 组织合规规则资源唯一标识。
def organization_policy_assignment_urn(self, organization_policy_assignment_urn): self._organization_policy_assignment_urn = organization_policy_assignment_urn
[ "def organization_policy_assignment_urn(self):\n return self._organization_policy_assignment_urn", "def organization_policy_assignment_id(self, organization_policy_assignment_id):\n self._organization_policy_assignment_id = organization_policy_assignment_id", "def organization_policy_assignment_na...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the organization_policy_assignment_id of this OrganizationPolicyAssignmentResponse. 组织合规规则ID。
def organization_policy_assignment_id(self): return self._organization_policy_assignment_id
[ "def organization_policy_assignment_id(self, organization_policy_assignment_id):\n self._organization_policy_assignment_id = organization_policy_assignment_id", "def policy_assignment_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy_assignment_id\")", "def policy_assignm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the organization_policy_assignment_id of this OrganizationPolicyAssignmentResponse. 组织合规规则ID。
def organization_policy_assignment_id(self, organization_policy_assignment_id): self._organization_policy_assignment_id = organization_policy_assignment_id
[ "def organization_policy_assignment_id(self):\n return self._organization_policy_assignment_id", "def organization_policy_assignment_name(self, organization_policy_assignment_name):\n self._organization_policy_assignment_name = organization_policy_assignment_name", "def organization_policy_assignm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the organization_policy_assignment_name of this OrganizationPolicyAssignmentResponse. 组织合规规则名称。
def organization_policy_assignment_name(self): return self._organization_policy_assignment_name
[ "def policy_assignment_name(self):\n return self._policy_assignment_name", "def policy_assignment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy_assignment_name\")", "def organization_policy_assignment_name(self, organization_policy_assignment_name):\n self._o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the organization_policy_assignment_name of this OrganizationPolicyAssignmentResponse. 组织合规规则名称。
def organization_policy_assignment_name(self, organization_policy_assignment_name): self._organization_policy_assignment_name = organization_policy_assignment_name
[ "def policy_assignment_name(self, policy_assignment_name):\n self._policy_assignment_name = policy_assignment_name", "def organization_policy_assignment_name(self):\n return self._organization_policy_assignment_name", "def organization_policy_assignment_id(self, organization_policy_assignment_id):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the period of this OrganizationPolicyAssignmentResponse. 触发周期。
def period(self, period): self._period = period
[ "def set_alarm_period(self, period):\n self._alarm_period = period \n self._last_timestamp=self._current_timestamp", "def period(self, period):\n\n self._period = period", "def setPeriod(self, period):\n self.period = period", "def limit_period(self, limit_period):\n self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the policy_filter of this OrganizationPolicyAssignmentResponse.
def policy_filter(self): return self._policy_filter
[ "def policy(self) -> HwPolicy:\n return self._policy", "def policy(self) -> typing.Optional[\"BucketPolicy\"]:\n return jsii.get(self, \"policy\")", "def permission_policy(self):\n return self._permission_policy", "def policies_permissions(self):\n return self._policies_permissions...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the policy_filter of this OrganizationPolicyAssignmentResponse.
def policy_filter(self, policy_filter): self._policy_filter = policy_filter
[ "def setPolicy(self, policy):\n pass", "def set_policy(self, policy):\n self.policy = policy", "def set_assignment_policy(self, policy):\n self._config['assignment-policy'] = assert_type(policy, AssignmentPolicy)\n return self", "def post_set_iam_policy(self, response: policy_pb2.P...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the policy_definition_id of this OrganizationPolicyAssignmentResponse. 策略ID。
def policy_definition_id(self): return self._policy_definition_id
[ "def policy_definition_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"policy_definition_id\")", "def policy_definition_id(self) -> str:\n return pulumi.get(self, \"policy_definition_id\")", "def policy_definition_id(self) -> Optional[pulumi.Input[str]]:\n return pulum...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the policy_definition_id of this OrganizationPolicyAssignmentResponse. 策略ID。
def policy_definition_id(self, policy_definition_id): self._policy_definition_id = policy_definition_id
[ "def policy_definition_id(self):\n return self._policy_definition_id", "def organization_policy_assignment_id(self, organization_policy_assignment_id):\n self._organization_policy_assignment_id = organization_policy_assignment_id", "def policy_definition_id(self) -> str:\n return pulumi.get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a quaternion frame into an Euler frame
def get_euler_frame(quaternionion_frame): euler_frame = list(quaternionion_frame[:3]) for quaternion in gen_4_tuples(quaternionion_frame[3:]): euler_frame += quaternion_to_euler(quaternion) return euler_frame
[ "def euler_from_quaternion(quaternion, axes='sxyz'):\r\n return euler_from_matrix(quaternion_matrix(quaternion), axes)", "def quat_to_euler(orientation):\n quaternion = (\n orientation.x,\n orientation.y,\n orientation.z,\n orientation.w\n )\n euler = tf.transformations.eul...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a quaternion frame into an Euler frame
def get_euler_frame(quaternionion_frame): euler_frame = list(quaternionion_frame[:3]) for quaternion in gen_4_tuples(quaternionion_frame[3:]): euler_frame += quaternion_to_euler(quaternion) return euler_frame
[ "def euler_from_quaternion(quaternion, axes='sxyz'):\r\n return euler_from_matrix(quaternion_matrix(quaternion), axes)", "def quat_to_euler(orientation):\n quaternion = (\n orientation.x,\n orientation.y,\n orientation.z,\n orientation.w\n )\n euler = tf.transformations.eul...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare an InteractiveSession, Open a ReproducibleSession, (optionally) Add a metadata command, Close the ReproducibleSession
def new_case_study(with_metadata_command=0): isess = InteractiveSession(DBSession) isess.identify({"user": "test_user"}, testing=True) # Pass just user name. isess.open_reproducible_session(case_study_version_uuid=None, recover_previous_state=None, ...
[ "def test_interactive(self):\n self.executor.command(['python']).interactive()", "def test_create_interactive_session(app, default_user, sample_serial_workflow_in_db):\n wrm = WorkflowRunManager(sample_serial_workflow_in_db)\n expected_data = {\"path\": wrm._generate_interactive_workflow_path()}\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Constructor of clustering algorithm CLARANS. The higher the value of maxneighbor, the closer is CLARANS to KMedoids, and the longer is each search of a local minima.
def __init__(self, data, number_clusters, numlocal, maxneighbor): self.__pointer_data = data self.__numlocal = numlocal self.__maxneighbor = maxneighbor self.__number_clusters = number_clusters self.__clusters = [] self.__current = [] self.__belong = []...
[ "def initialize_means_knn(self, k=1):\n\n print(\"initializing LAND means using spectral clustering on kNN-graph, edges reweighted by geodesic distances.\")\n data = self.data\n dim = data.shape[1]\n\n # construct solver graph based on the data\n solver_graph = geodesics.SolverGra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Returns clustering result representation type that indicate how clusters are encoded. (type_encoding) Clustering result representation. get_clusters()
def get_cluster_encoding(self): return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
[ "def cluster_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_type\")", "def get_cluster_encoding(self):\n\n return type_encoding.CLUSTER_INDEX_LIST_SEPARATION", "def cluster_type(self) -> str:\n return pulumi.get(self, \"cluster_type\")", "def cluster_types(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Forms cluster in line with specified medoids by calculation distance from each point to medoids.
def __update_clusters(self, medoids): self.__belong = [0] * len(self.__pointer_data) self.__clusters = [[] for i in range(len(medoids))] for index_point in range(len(self.__pointer_data)): index_optim = -1 dist_optim = 0.0 for index in range(len(medo...
[ "def __update_clusters(self, medoids):\n\n self.__belong = [0] * len(self.__pointer_data)\n self.__clusters = [[] for _ in range(len(medoids))]\n for index_point in range(len(self.__pointer_data)):\n index_optim = -1\n dist_optim = 0.0\n\n for index in range(len...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Finds the another nearest medoid for the specified point that is different from the specified medoid.
def __find_another_nearest_medoid(self, point_index, current_medoid_index): other_medoid_index = -1 other_distance_nearest = float('inf') for index_medoid in self.__current: if (index_medoid != current_medoid_index): other_distance_candidate = euclidean_distance_...
[ "def __find_another_nearest_medoid(self, point_index, current_medoid_index):\n other_medoid_index = -1\n other_distance_nearest = float(\"inf\")\n for index_medoid in self.__current:\n if index_medoid != current_medoid_index:\n other_distance_candidate = euclidean_dist...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Concatenate n frames before and after the current frame
def concatenate_x_frames(x, y, num_of_frames, is_y=True): if is_y: items_x = list() items_y = list() for item in range(len(x)): x_concat = [] for i in range(num_of_frames, len(x[item]) - num_of_frames): tmp_x = None is_first = True ...
[ "def _skip_frames(self, n=1):\n w, h = self._size\n for _ in range(n):\n self._proc.stdout.read(self._depth*w*h)\n # self._proc.stdout.flush()\n self._pos += n", "def distribute_frames():\r\n # round-robin distribute frames into 4 ~equal parts\r\n frames = [x % 4 f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wraps and indents a string ``s``.
def indent_wrap(s, indent=0, wrap=80): split = wrap - indent chunks = [indent * " " + s[i:i + split] for i in range(0, len(s), split)] return "\n".join(chunks)
[ "def indentString(s, indent=\" \"):\n return indent + str(s).replace(\"\\n\", \"\\n\" + indent)", "def wrap_string(input_str):\r\n return textwrap.wrap(input_str, 80)", "def indent(amount: int, s: str) -> str:\n prefix = amount * \" \"\n return \"\\n\".join(prefix + line for line in s.splitlines())", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively traverse through iterable object ``d`` and convert all occuring ndarrays to lists to make it JSON serializable.
def serialize_ndarrays(d): def dict_handler(d): return d.items() handlers = {list: enumerate, tuple: enumerate, set: enumerate, frozenset: enumerate, dict: dict_handler} def serialize(o): for typ, handler in handlers.items(): if isinstance(o, typ...
[ "def traverse_data(datum, is_numpy=is_numpy, use_numpy=True):\n is_numpy = is_numpy and use_numpy\n if is_numpy and not any(isinstance(el, (list, tuple)) for el in datum):\n return transform_array(np.asarray(datum))\n datum_copy = []\n for item in datum:\n if isinstance(item, (list, tuple)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Populate dictionary with data from a given dict ``d``, and check if ``d`` has required and optional keys. Set optionals with default if not present. If input ``d`` is None and ``required_keys`` is empty, just return ``opt_keys``.
def fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True): if required_keys is None: required_keys = [] if opt_keys is None: opt_keys = {} if d is None: if not required_keys: if opt_keys is None: raise TypeError("`d` and òpt_keys` are both ...
[ "def setup_dict(data, required=None, defaults=None):\n required = required or []\n for i in set(required) - set(data):\n raise IndexError(\"Missed: %s\" % i)\n\n defaults = defaults or {}\n for i in set(data) - set(required) - set(defaults):\n raise ValueError(\"Unexpected: %s\" % i)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of dicts/objects return a dict mapping item[key_name] > item
def list_to_map(item_list, key_name): return {x.pop(key_name): x for x in item_list}
[ "def ListOfDictsToDict(list_of_dicts, key):\n result = {}\n\n for item in list_of_dicts:\n result[item[key]] = item\n\n return result", "def list_to_dict(list_of_dicts):\n output = defaultdict(list)\n for dict_ in list_of_dicts:\n for key, value in dict_.items():\n dict_[ke...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run a command from a list with optional environemnt and return a tuple (rc, stdout_str, stderr_str)
def run_command_list(cmd_list, env=None): rc = -1 sout = serr = None cmd_list = run_sanitize(cmd_list) try: if env: pipes = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) else: pipes = subprocess.Popen(cmd_list, stdout=subproc...
[ "def cmd_run(command):\n\n # Escape special chars\n command = shlex.quote(command)\n command = shlex.split(command)\n # maybe try catch here...\n process = subprocess.Popen(\n command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True\n )\n stdout, stderr = process.communicate()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string for use with acquire() calls optionally. Constructs a consistent id from the platform node, process_id and thread_id
def get_threadbased_id(guarantee_uniq=False): return '{}:{}:{}:{}'.format(platform.node(), os.getpid(), str(threading.get_ident()),uuid.uuid4().hex if guarantee_uniq else '')
[ "def get_ident():\r\n return id(greenthread.getcurrent())", "def get_pid_tid():\n # noinspection PyBroadException\n try:\n return \"(pid=%s) (tid=%s)\" % (\n six.text_type(os.getpid()),\n six.text_type(six.moves._thread.get_ident()),\n )\n except Exception:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert the rfc3339 formatted string (UTC only) to a datatime object with tzinfo explicitly set to utc. Raises an exception if the parsing fails.
def rfc3339str_to_datetime(rfc3339_str): ret = None for fmt in rfc3339_date_input_fmts: try: ret = datetime.datetime.strptime(rfc3339_str, fmt) # Force this since the formats we support are all utc formats, to support non-utc if ret.tzinfo is None: re...
[ "def parse_rfc3339(s: str) -> datetime.datetime:\n return datetime.datetime.strptime(s, \"%Y-%m-%dT%H:%M:%S.%fZ\")", "def datetime_parser(s):\n try:\n ts = arrow.get(s)\n # Convert UTC to local, result of get is UTC unless it specifies\n # timezone, bonfire assumes all time to be machin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert an epoch int value to a RFC3339 datetime string
def epoch_to_rfc3339(epoch_int): return datetime_to_rfc3339(datetime.datetime.utcfromtimestamp(epoch_int))
[ "def epoch_to_str(epoch: int) -> str:\n return datetime_to_str(datetime.fromtimestamp(epoch, tz=timezone.utc))", "def epoch_to_format(epoch, format='%Y-%m-%dT%H:%M:%SZ'):\n\n return datetime.fromtimestamp(int(epoch[:10]), tz=timezone.utc).strftime(format)", "def epoch_to_date(epoch):\n date_string ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a CPE 2.3 formatted string and returns a CPE object. This is the only supported method to create an instance of this class This is not entirely true to the spec, it does not unbind all the elements as wfn representation is not used. All of unbinding logic is concentrated in the conversion from wfn to uri format i...
def from_cpe23_fs(cpe23_fs): cpe_parts = cpe23_fs.split(':') if cpe_parts and len(cpe_parts) == 13: return CPE( part=cpe_parts[2], vendor=cpe_parts[3], product=cpe_parts[4], version=cpe_parts[5], update=cpe_par...
[ "def parse_tws_composition(cls, fn: str) -> Composition:\n\n out = {}\n\n with open(fn, \"r\") as f:\n for line in f.readlines():\n items = line.strip().split(\",\")\n\n symbol = items[0]\n _, pex = items[3].split(\"/\")\n\n nc = S...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper method for escaping the Ensures that resulting version is CPE 2.3 formatted string compliant, this is necessary for as_cpe22_uri() to do its thing affected version data in nvd json data which is usually unescaped. Converts the supplied version
def update_version(self, version): self.version = CPE.escape_for_cpe23_fs(version)
[ "def coerce_data_version(version):\n\n # for name/date-based versions, replace dashes with underscores\n version_new = version.replace(\"-\", \"_\")\n\n # for v2.3.1 style version,s replace dots with underscores\n version_new = version_new.replace(\".\", \"_\")\n\n valid_chars = set(string.digits + s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is a very limited implementation of cpe matching. other_cpe is a wildcard ridden base cpe used by range descriptors other_cpe checked against this cpe for an exact match of part and vendor. For all the remaining components a match is positive if the other cpe is an exact match or contains the wild char
def is_match(self, other_cpe): if not isinstance(other_cpe, CPE): return False if self.part == other_cpe.part and self.vendor == other_cpe.vendor: if other_cpe.product not in ['*', self.product]: return False if other_cpe.version not in ['*', self.ve...
[ "def compare_cpes(lhs: ImageCpe, rhs: ImageCpe):\n vendor_cmp = compare_fields(lhs.vendor, rhs.vendor)\n if vendor_cmp != 0:\n return vendor_cmp\n\n name_cmp = compare_fields(lhs.name, rhs.name)\n if name_cmp != 0:\n return name_cmp\n\n version_cmp = compare_fields(lhs.version, rhs.vers...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Event handler for use with ijson parsers to output floats instead of Decimals for better json serializability downstream.
def ijson_decimal_to_float(event): if event[1] == 'number' and isinstance(event[2], decimal.Decimal): return event[0], event[1], float(event[2]) else: return event
[ "def float_format(self):\n ...", "def _serialize_decimal(val):\n return float(val)", "def convert_float(self,data):\n int_tags = self.int_tags\n\n data_handle = {}\n\n log.debug(data)\n\n for key,value in data.items():\n\n if (key not in int_tags) and (type(value...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sync the provided roles and permissions.
def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) -> None: existing_roles = self._get_all_roles_with_permissions() non_dag_perms = self._get_all_non_dag_permissions() for config in roles: role_name = config["role"] perms = config["perms"] role = exis...
[ "def sync_roles(self) -> None:\n # Create global all-dag permissions\n self.create_perm_vm_for_all_dag()\n\n # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n\n self.add_homepage_access_to_custom_roles()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all the roles associated with the user.
def get_user_roles(user=None): if user is None: user = g.user return user.roles
[ "def get_user_roles(self):\n url = 'userroles'\n result = self.get(url)\n return result.get('userroles', result)", "def list(self, **kwargs):\n # TODO(adriant): Look up user by name/id\n url = '/openstack/users/%s/roles' % kwargs['user']\n return self._list(url, 'roles')"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the DAGs readable by authenticated user.
def get_readable_dags(self, user) -> Iterable[DagModel]: warnings.warn( "`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.", RemovedInAirflow3Warning, stacklevel=2, ) with warnings.catch_warnings(): warnings.simple...
[ "def get_readable_dag_ids(self, user) -> set[str]:\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])", "def _get_read_accessible_workspaces_by_user(user):\n if not settings.CAN_ANONYMOUS_ACCESS_PUBLIC_DOCUMENT and user.is_anonymous:\n accessible_workspaces = []\n else:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the DAGs editable by authenticated user.
def get_editable_dags(self, user) -> Iterable[DagModel]: warnings.warn( "`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.", RemovedInAirflow3Warning, stacklevel=2, ) with warnings.catch_warnings(): warnings.simple...
[ "def get_editable_dag_ids(self, user) -> set[str]:\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])", "def get_editable_explorations(user_id):\n return [e for e in get_viewable_explorations(user_id)\n if e.is_editable_by(user_id)]", "def get_readable_dag_ids(self, u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the DAG IDs readable by authenticated user.
def get_readable_dag_ids(self, user) -> set[str]: return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])
[ "def get_editable_dag_ids(self, user) -> set[str]:\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])", "async def get_readable_project_ids(\n conn: SAConnection, user_id: UserID\n) -> list[ProjectID]:\n projects_access_rights = await list_projects_access_rights(conn, user_id)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the DAG IDs editable by authenticated user.
def get_editable_dag_ids(self, user) -> set[str]: return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])
[ "def get_readable_dag_ids(self, user) -> set[str]:\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])", "def get_editable_dags(self, user) -> Iterable[DagModel]:\n warnings.warn(\n \"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines whether a user has DAG read access.
def can_read_dag(self, dag_id: str, user=None) -> bool: root_dag_id = self._get_root_dag_id(dag_id) dag_resource_name = permissions.resource_name_for_dag(root_dag_id) return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)
[ "def has_read_access():\n return api.is_admin() or api.is_group_member('groups-readonly-access')", "def has_access(self, action_name: str, resource_name: str, user=None) -> bool:\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines whether a user has DAG delete access.
def can_delete_dag(self, dag_id: str, user=None) -> bool: root_dag_id = self._get_root_dag_id(dag_id) dag_resource_name = permissions.resource_name_for_dag(root_dag_id) return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user)
[ "def can_delete(self, user):\n raise Return(False)", "def allows_delete(self, user):\n return user.has_perm(\"questions.delete_question\")", "def allows_delete(self, user):\n return user.has_perm(\"questions.delete_answer\")", "def can_delete(self, user_obj):\n if self.id is None:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the permission name for a DAG id.
def prefixed_dag_id(self, dag_id: str) -> str: warnings.warn( "`prefixed_dag_id` has been deprecated. " "Please use `airflow.security.permissions.resource_name_for_dag` instead.", RemovedInAirflow3Warning, stacklevel=2, ) root_dag_id = self._get_ro...
[ "def get_name(id):\r\n\r\n graph = GraphAPI(access_token=TOKEN, version='2.5')\r\n\r\n return graph.get_object(id=str(id).split('-')[0])['name']", "def PermissionSetName(self) -> str:", "def get_permission_name(cls, permission_type):\n split = permission_type.split(\"_\")\n if len(split) < 2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines if a resource belongs to a DAG or all DAGs.
def is_dag_resource(self, resource_name: str) -> bool: if resource_name == permissions.RESOURCE_DAG: return True return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
[ "def isDAG(self) -> bool:\n return nx.is_directed_acyclic_graph(self.graph)", "def has_access(self, action_name: str, resource_name: str, user=None) -> bool:\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n\n if self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify whether a given user could perform a certain action on the given resource. Example actions might include can_read, can_write, can_delete, etc.
def has_access(self, action_name: str, resource_name: str, user=None) -> bool: if not user: user = g.user if (action_name, resource_name) in user.perms: return True if self.is_dag_resource(resource_name): if (action_name, permissions.RESOURCE_DAG) in user.per...
[ "def user_has_action_permission(self, user_db, action_ref):\n pass", "def allow(self, user, action):\n if action == 'view':\n return True\n elif action == 'edit':\n if user.is_allowed:\n return True\n else:\n return False", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
FAB leaves faulty permissions that need to be cleaned up.
def clean_perms(self) -> None: self.log.debug("Cleaning faulty perms") sesh = self.appbuilder.get_session perms = sesh.query(Permission).filter( or_( Permission.action == None, # noqa Permission.resource == None, # noqa ) ) ...
[ "def clean_perms(self) -> None:\n\n logger.info(\"Cleaning faulty perms\")\n sesh = self.get_session\n pvms = sesh.query(PermissionView).filter(\n or_(\n PermissionView.permission # pylint: disable=singleton-comparison\n == None,\n Permis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the new (action, resource) to assoc_permission_role if it doesn't exist. It will add the related entry to ab_permission and ab_resource two meta tables as well.
def _merge_perm(self, action_name: str, resource_name: str) -> None: action = self.get_action(action_name) resource = self.get_resource(resource_name) perm = None if action and resource: perm = self.appbuilder.get_session.scalar( select(self.permission_model)....
[ "def add_permission_role(self, role, perm_view):\n if perm_view not in role.permissions:\n try:\n role.permissions.append(perm_view)\n role.save()\n log.info(\"Added Permission %s to role %s\" % (str(perm_view), role.name))\n except Exception...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add Website.can_read access to all custom roles.
def add_homepage_access_to_custom_roles(self) -> None: website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE) custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES] for role in custom_roles: self.add_p...
[ "def addRoleAccess(self, role, read, write, catalog='*', repository='*'):\n return self._client.addRoleAccess(role, read, write, catalog, repository)", "def roles():\n pass", "def test_ipam_roles_read(self):\n pass", "def create_custom_permissions(self) -> None:\n self.add_permission_v...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all permissions as a set of tuples with the action and resource names.
def get_all_permissions(self) -> set[tuple[str, str]]: return set( self.appbuilder.get_session.execute( select(self.action_model.name, self.resource_model.name) .join(self.permission_model.action) .join(self.permission_model.resource) ) ...
[ "def permission_resources(self):\n return self._permission_resources", "def get_permissions(self):\n if self.action in ['retrieve', 'list', 'add_view']:\n permission_classes = [AllowAny]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get permissions except those that are for specific DAGs. Returns a dict with a key of (action_name, resource_name) and value of permission with all permissions except those that are for specific DAGs.
def _get_all_non_dag_permissions(self) -> dict[tuple[str, str], Permission]: return { (action_name, resource_name): viewmodel for action_name, resource_name, viewmodel in ( self.appbuilder.get_session.execute( select(self.action_model.name, self.resour...
[ "def permissions_management_actions_without_constraints(self):\n results = []\n if self.always_exclude_actions:\n for (\n action\n ) in self.policy_document.permissions_management_without_constraints:\n if is_name_excluded(action.lower(), self.always...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a dict with a key of role name and value of role with early loaded permissions.
def _get_all_roles_with_permissions(self) -> dict[str, Role]: return { r.name: r for r in self.appbuilder.get_session.scalars( select(self.role_model).options(joinedload(self.role_model.permissions)) ).unique() }
[ "def get_roles(role):", "def existing_role_settings(self):\r\n context = self.context\r\n \r\n portal_membership = getToolByName(context, 'portal_membership')\r\n portal_groups = getToolByName(context, 'portal_groups')\r\n acl_users = getToolByName(context, 'acl_users')\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add permissions to all DAGs. Creates 'can_read', 'can_edit', and 'can_delete' permissions for all DAGs, along with any `access_control` permissions provided in them. This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag` if you only need to sync a single DAG.
def create_dag_specific_permissions(self) -> None: perms = self.get_all_permissions() dagbag = DagBag(read_dags_from_db=True) dagbag.collect_dags_from_db() dags = dagbag.dags.values() for dag in dags: root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag...
[ "def sync_perm_for_dag(\n self,\n dag_id: str,\n access_control: dict[str, Collection[str]] | None = None,\n ) -> None:\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add missing permissions to the table for admin. Admin should get all the permissions, except the dag permissions because Admin already has Dags permission. Add the missing ones to the table for admin.
def update_admin_permission(self) -> None: session = self.appbuilder.get_session dag_resources = session.scalars( select(Resource).where(Resource.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%")) ) resource_ids = [resource.id for resource in dag_resources] perms = se...
[ "def create_missing_perms(self) -> None:\n\n # pylint: disable=import-outside-toplevel\n from superset.connectors.sqla.models import SqlaTable\n from superset.models import core as models\n\n logger.info(\"Fetching a set of all perms to lookup which ones are missing\")\n all_pvs =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize default and custom roles with related permissions. 1. Init the default role(Admin, Viewer, User, Op, public) with related permissions. 2. Init the custom role(daguser) with related permissions.
def sync_roles(self) -> None: # Create global all-dag permissions self.create_perm_vm_for_all_dag() # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions self.bulk_sync_roles(self.ROLE_CONFIGS) self.add_homepage_access_to_custom_roles() # i...
[ "def init_default_roles() -> None:\n roles_info = _get_roles_info()\n roles_resources = _get_roles_resources()\n\n admin_role = Role.query.first()\n if admin_role:\n raise RuntimeError(u'admin role is exist!')\n\n for role_name, role_info in roles_info.items():\n role_resources = roles_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sync permissions for given dag id. The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.
def sync_perm_for_dag( self, dag_id: str, access_control: dict[str, Collection[str]] | None = None, ) -> None: dag_resource_name = permissions.resource_name_for_dag(dag_id) for dag_action_name in self.DAG_ACTIONS: self.create_permission(dag_action_name, dag_resour...
[ "def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) -> Permission | None:\n perm = self.get_permission(action_name, dag...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the access policy on the given DAG's ViewModel.
def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None: dag_resource_name = permissions.resource_name_for_dag(dag_id) def _get_or_create_dag_permission(action_name: str) -> Permission | None: perm = self.get_permission(action_name, dag_resource...
[ "def setPolicy(self, policy):\n pass", "def set_policy(self, policy):\n self.policy = policy", "def sync_perm_for_dag(\n self,\n dag_id: str,\n access_control: dict[str, Collection[str]] | None = None,\n ) -> None:\n dag_resource_name = permissions.resource_name_for_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }