_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q42000
ModelAdapter.adapt_persistent_to_rest
train
def adapt_persistent_to_rest(self, persistent_object, attribute_filter=None): """ adapts a persistent model to a rest model by inspecting """ # convert filter to immutable if it isn't already if isinstance(attribute_filter, parser.AttributeFilter): attribute_filter = attribute_filter.as_immutable() rest_model_instance = self.rest_model_class() for attribute_key in rest_model_instance.get_attribute_keys(): # attribute is not visible don't bother processing if isinstance(attribute_filter, (parser.AttributeFilter, parser.AttributeFilterImmutable)) and \ not attribute_filter.is_attribute_visible(attribute_key): continue rest_attr = getattr(self.rest_model_class, attribute_key) # don't bother processing if the persistent model doesn't have this attribute if not hasattr(persistent_object, attribute_key): if isinstance(rest_attr, types.Model): #: If the attribute is a Model, then we set it to None otherwise we get a model #: with default values, which is invalid when constructing responses try: setattr(rest_model_instance, attribute_key, None) # catch any exception thrown from setattr to give a usable error message except TypeError as exp: raise TypeError('Attribute %s, %s' % (attribute_key, str(exp))) continue # ignore class methods elif inspect.ismethod(getattr(persistent_object, attribute_key)): import logging logging.error("ignoring method: "+attribute_key) continue # handles prestans array population from SQLAlchemy relationships elif isinstance(rest_attr, types.Array): persistent_attr_value = getattr(persistent_object, attribute_key) rest_model_array_handle = getattr(rest_model_instance, attribute_key) # iterator uses the .append method exposed by prestans arrays to validate # and populate the collection in the instance. for collection_element in persistent_attr_value: if rest_attr.is_scalar: rest_model_array_handle.append(collection_element) else: element_adapter = registry.get_adapter_for_rest_model(rest_attr.element_template) # check if there is a sub model filter sub_attribute_filter = None if attribute_filter and attribute_key in attribute_filter: sub_attribute_filter = getattr(attribute_filter, attribute_key) adapted_rest_model = element_adapter.adapt_persistent_to_rest( collection_element, sub_attribute_filter ) rest_model_array_handle.append(adapted_rest_model) elif isinstance(rest_attr, types.Model): try: persistent_attr_value = getattr(persistent_object, attribute_key) if persistent_attr_value is None: adapted_rest_model = None else: model_adapter = registry.get_adapter_for_rest_model(rest_attr) # check if there is a sub model filter sub_attribute_filter = None if isinstance(attribute_filter, (parser.AttributeFilter, parser.AttributeFilterImmutable)) and \ attribute_key in attribute_filter: sub_attribute_filter = getattr(attribute_filter, attribute_key) adapted_rest_model = model_adapter.adapt_persistent_to_rest( persistent_attr_value, sub_attribute_filter ) setattr(rest_model_instance, attribute_key, adapted_rest_model) except TypeError as exp: raise TypeError('Attribute %s, %s' % (attribute_key, str(exp))) except exception.DataValidationException as exp: raise exception.InconsistentPersistentDataError(attribute_key, str(exp)) else: # otherwise copy the value to the rest model try: persistent_attr_value = getattr(persistent_object, attribute_key) setattr(rest_model_instance, attribute_key, persistent_attr_value) except TypeError as exp: raise TypeError('Attribute %s, %s' % (attribute_key, str(exp))) except exception.ValidationError as exp: raise exception.InconsistentPersistentDataError(attribute_key, str(exp)) return rest_model_instance
python
{ "resource": "" }
q42001
Solr._send_solr_command
train
def _send_solr_command(self, core_url, json_command): """ Sends JSON string to Solr instance """ # Check document language and dispatch to correct core url = _get_url(core_url, "update") try: response = self.req_session.post(url, data=json_command, headers={'Content-Type': 'application/json'}) response.raise_for_status() except requests.RequestException as e: logger.error("Failed to send update to Solr endpoint [%s]: %s", core_url, e, exc_info=True) raise SolrException("Failed to send command to Solr [%s]: %s" % (core_url, e,)) return True
python
{ "resource": "" }
q42002
Solr.add
train
def add(self, documents, boost=None): """ Adds documents to Solr index documents - Single item or list of items to add """ if not isinstance(documents, list): documents = [documents] documents = [{'doc': d} for d in documents] if boost: for d in documents: d['boost'] = boost self._add_batch.extend(documents) if len(self._add_batch) > SOLR_ADD_BATCH: self._addFlushBatch()
python
{ "resource": "" }
q42003
Solr._addFlushBatch
train
def _addFlushBatch(self): """ Sends all waiting documents to Solr """ if len(self._add_batch) > 0: language_batches = {} # Create command JSONs for each of language endpoints for lang in self.endpoints: # Append documents with languages without endpoint to default endpoint document_jsons = ["\"add\":" + json.dumps(data) for data in self._add_batch if data['doc'].get("language", self.default_endpoint) == lang or (lang == self.default_endpoint and not self.endpoints.has_key(data['doc'].get("language", None)))] command_json = "{" + ",".join(document_jsons) + "}" language_batches[lang] = command_json # Solr requires for documents to be sent in { "add" : { "doc" : {...} }, "add": { "doc" : { ... }, ... } # format which isn't possible with python dictionaries for lang in language_batches: self._send_solr_command(self.endpoints[lang], language_batches[lang]) self._add_batch = []
python
{ "resource": "" }
q42004
Solr.deleteAll
train
def deleteAll(self): """ Deletes whole Solr index. Use with care. """ for core in self.endpoints: self._send_solr_command(self.endpoints[core], "{\"delete\": { \"query\" : \"*:*\"}}")
python
{ "resource": "" }
q42005
Solr.delete
train
def delete(self, id): """ Deletes document with ID on all Solr cores """ for core in self.endpoints: self._send_solr_command(self.endpoints[core], "{\"delete\" : { \"id\" : \"%s\"}}" % (id,))
python
{ "resource": "" }
q42006
Solr.commit
train
def commit(self): """ Flushes all pending changes and commits Solr changes """ self._addFlushBatch() for core in self.endpoints: self._send_solr_command(self.endpoints[core], "{ \"commit\":{} }")
python
{ "resource": "" }
q42007
Solr._get_shards
train
def _get_shards(self): """ Returns comma separated list of configured Solr cores """ if self._shards is None: endpoints = [] for endpoint in self.endpoints: # We need to remove and http:// prefixes from URLs url = urlparse.urlparse(self.endpoints[endpoint]) endpoints.append("/".join([url.netloc, url.path])) self._shards = ",".join(endpoints) return self._shards
python
{ "resource": "" }
q42008
Solr._parse_response
train
def _parse_response(self, results): """ Parses result dictionary into a SolrResults object """ dict_response = results.get("response") result_obj = SolrResults() result_obj.query_time = results.get("responseHeader").get("QTime", None) result_obj.results_count = dict_response.get("numFound", 0) result_obj.start_index = dict_response.get("start", 0) for doc in dict_response.get("docs", []): result_obj.documents.append(doc) # Process facets if "facet_counts" in results: facet_types = ["facet_fields", "facet_dates", "facet_ranges", "facet_queries"] for type in facet_types: assert type in results.get("facet_counts") items = results.get("facet_counts").get(type) for field, values in items.items(): result_obj.facets[field] = [] # Range facets have results in "counts" subkey and "between/after" on top level. Flatten this. if type == "facet_ranges": if not "counts" in values: continue for facet, value in values["counts"].items(): result_obj.facets[field].append((facet, value)) if "before" in values: result_obj.facets[field].append(("before", values["before"])) if "after" in values: result_obj.facets[field].append(("after", values["after"])) else: for facet, value in values.items(): # Date facets have metadata fields between the results, skip the params, keep "before" and "after" fields for other if type == "facet_dates" and \ (facet == "gap" or facet == "between" or facet == "start" or facet == "end"): continue result_obj.facets[field].append((facet, value)) # Process highlights if "highlighting" in results: for key, value in results.get("highlighting").items(): result_obj.highlights[key] = value return result_obj
python
{ "resource": "" }
q42009
Solr.query
train
def query(self, query, filters=None, columns=None, sort=None, start=0, rows=30): """ Queries Solr and returns results query - Text query to search for filters - dictionary of filters to apply when searching in form of { "field":"filter_value" } columns - columns to return, list of strings sort - list of fields to sort on in format of ["field asc", "field desc", ... ] start - start number of first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30) """ if not columns: columns = ["*", "score"] fields = {"q": query, "json.nl" :"map", # Return facets as JSON objects "fl": ",".join(columns), # Return score along with results "start": str(start), "rows": str(rows), "wt": "json"} # Use shards parameter only if there are several cores active if len(self.endpoints) > 1: fields["shards"] = self._get_shards() # Prepare filters if not filters is None: filter_list = [] for filter_field, value in filters.items(): filter_list.append("%s:%s" % (filter_field, value)) fields["fq"] = " AND ".join(filter_list) # Append sorting parameters if not sort is None: fields["sort"] = ",".join(sort) # Do request to Solr server to default endpoint (other cores will be queried with shard functionality) assert self.default_endpoint in self.endpoints request_url = _get_url(self.endpoints[self.default_endpoint], "select") results = self._send_solr_query(request_url, fields) if not results: return None assert "responseHeader" in results # Check for response status if not results.get("responseHeader").get("status") == 0: logger.error("Server error while retrieving results: %s", results) return None assert "response" in results result_obj = self._parse_response(results) return result_obj
python
{ "resource": "" }
q42010
Solr.more_like_this
train
def more_like_this(self, query, fields, columns=None, start=0, rows=30): """ Retrieves "more like this" results for a passed query document query - query for a document on which to base similar documents fields - fields on which to base similarity estimation (either comma delimited string or a list) columns - columns to return (list of strings) start - start number for first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30) """ if isinstance(fields, basestring): mlt_fields = fields else: mlt_fields = ",".join(fields) if columns is None: columns = ["*", "score"] fields = {'q' : query, 'json.nl': 'map', 'mlt.fl': mlt_fields, 'fl': ",".join(columns), 'start': str(start), 'rows': str(rows), 'wt': "json"} if len(self.endpoints) > 1: fields["shards"] = self._get_shards() assert self.default_endpoint in self.endpoints request_url = _get_url(self.endpoints[self.default_endpoint], "mlt") results = self._send_solr_query(request_url, fields) if not results: return None assert "responseHeader" in results # Check for response status if not results.get("responseHeader").get("status") == 0: logger.error("Server error while retrieving results: %s", results) return None assert "response" in results result_obj = self._parse_response(results) return result_obj
python
{ "resource": "" }
q42011
run_suite
train
def run_suite(case, config, summary): """ Run the full suite of numerics tests """ m = importlib.import_module(config['module']) m.set_up() config["name"] = case analysis_data = {} bundle = livvkit.numerics_model_module model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case) bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case) plot_dir = os.path.join(livvkit.output_dir, "numerics", "imgs") config["plot_dir"] = plot_dir functions.mkdir_p(plot_dir) model_cases = functions.collect_cases(model_dir) bench_cases = functions.collect_cases(bench_dir) for mscale in sorted(model_cases): bscale = bench_cases[mscale] if mscale in bench_cases else [] for mproc in model_cases[mscale]: full_name = '-'.join([mscale, mproc]) bpath = (os.path.join(bench_dir, mscale, mproc.replace("-", os.path.sep)) if mproc in bscale else "") mpath = os.path.join(model_dir, mscale, mproc.replace("-", os.path.sep)) model_data = functions.find_file(mpath, "*" + config["output_ext"]) bench_data = functions.find_file(bpath, "*" + config["output_ext"]) analysis_data[full_name] = bundle.get_plot_data(model_data, bench_data, m.setup[case], config) try: el = m.run(config, analysis_data) except KeyError: el = elements.error("Numerics Plots", "Missing data") result = elements.page(case, config['description'], element_list=el) summary[case] = _summarize_result(m, analysis_data, config) _print_summary(m, case, summary[case]) functions.create_page_from_template("numerics.html", os.path.join(livvkit.index_dir, "numerics", case + ".html")) functions.write_json(result, os.path.join(livvkit.output_dir, "numerics"), case + ".json")
python
{ "resource": "" }
q42012
ParticleSystem.run_ahead
train
def run_ahead(self, time, framerate): """Run the particle system for the specified time frame at the specified framerate to move time forward as quickly as possible. Useful for "warming up" the particle system to reach a steady-state before anything is drawn or to simply "skip ahead" in time. time -- The amount of simulation time to skip over. framerate -- The framerate of the simulation in updates per unit time. Higher values will increase simulation accuracy, but will take longer to compute. """ if time: td = 1.0 / framerate update = self.update for i in range(int(time / td)): update(td)
python
{ "resource": "" }
q42013
Panel.create_gp
train
def create_gp(self): """ Create GnuPlot file. """ nb_bams = len(self.bams) gp_parts = [ textwrap.dedent( """\ set log x set log x2 #set format x "10^{{%L}}" set format x2 "10^{{%L}}" set x2tics unset xtics """ ), os.linesep.join([self._gp_style_func(i, nb_bams) for i in range(nb_bams)]), textwrap.dedent( """\ set format y "%g %%" set ytics set pointsize 1.5 set grid ytics lc rgb "#777777" lw 1 lt 0 front set grid x2tics lc rgb "#777777" lw 1 lt 0 front set datafile separator "\\t" set palette negative """ ), os.linesep.join(self.gp_plots) ] gp_src = os.linesep.join(gp_parts) # .format( # x_lab=self.default_x_label, # ) with open(self._gp_fn, "w+") as f: f.write(gp_src)
python
{ "resource": "" }
q42014
Panel.create_graphics
train
def create_graphics(self): """Create images related to this panel.""" if len(self._svg_fns) > 0: rnftools.utils.shell('"{}" "{}"'.format("gnuplot", self._gp_fn)) if self.render_pdf_method is not None: for svg_fn in self._svg_fns: pdf_fn = re.sub(r'\.svg$', r'.pdf', svg_fn) svg42pdf(svg_fn, pdf_fn, method=self.render_pdf_method)
python
{ "resource": "" }
q42015
Panel.create_tar
train
def create_tar(self): """Create a tar file with all the files.""" def add_file_to_tar(tar, orig_fn, new_fn, func=None): tf = tarfile.TarInfo(name=new_fn) with open(orig_fn) as f: tfs = f.read() if func is not None: tfs = func(tfs) tf.size = len(tfs) tfs = io.BytesIO(tfs.encode('utf8')) tar.addfile(tarinfo=tf, fileobj=tfs) def add_text_to_tar(tar, new_fn, text, func=None): tf = tarfile.TarInfo(name=new_fn) if func is not None: text = func(text) tf.size = len(text) tfs = io.BytesIO(text.encode('utf8')) tar.addfile(tarinfo=tf, fileobj=tfs) def strip_lines(text): text = text.replace("\t", " ") while text.find(" ") != -1: text = text.replace(" ", " ") lines = [x.strip() for x in text.strip().split("\n")] return "\n".join(lines) + "\n" tar = tarfile.TarFile(self._tar_fn, "w") for i in range(len(self.bams)): roc_fn = self.bams[i].roc_fn() t_roc_fn = os.path.basename(roc_fn) gp_fn = self.bams[i].gp_fn() t_gp_fn = os.path.basename(gp_fn) svg_fn = self.bams[i].svg_fn() t_svg_fn = os.path.basename(svg_fn) add_file_to_tar(tar, roc_fn, t_roc_fn) add_file_to_tar( tar, gp_fn, t_gp_fn, lambda x: strip_lines(x.replace(roc_fn, t_roc_fn).replace(svg_fn, t_svg_fn)) ) gp_fn = self._gp_fn t_gp_fn = os.path.basename(gp_fn) svg_dir = os.path.join(self.panel_dir, "graphics") + "/" roc_dir = os.path.join(self.panel_dir, "roc") + "/" add_file_to_tar(tar, gp_fn, t_gp_fn, lambda x: strip_lines(x.replace(svg_dir, "").replace(roc_dir, ""))) makefile = [ ".PHONY: all", "all:", "\tgnuplot *.gp", "clean:", "\trm -f *.svg", "", ] add_text_to_tar(tar, "Makefile", "\n".join(makefile))
python
{ "resource": "" }
q42016
merge_dicts
train
def merge_dicts(dict1, dict2): """ Merge two dictionaries and return the result """ tmp = dict1.copy() tmp.update(dict2) return tmp
python
{ "resource": "" }
q42017
parse_gptl
train
def parse_gptl(file_path, var_list): """ Read a GPTL timing file and extract some data. Args: file_path: the path to the GPTL timing file var_list: a list of strings to look for in the file Returns: A dict containing key-value pairs of the livvkit and the times associated with them """ timing_result = dict() if os.path.isfile(file_path): with open(file_path, 'r') as f: for var in var_list: for line in f: if var in line: timing_result[var] = float(line.split()[4])/int(line.split()[2]) break return timing_result
python
{ "resource": "" }
q42018
find_file
train
def find_file(search_dir, file_pattern): """ Search for a file in a directory, and return the first match. If the file is not found return an empty string Args: search_dir: The root directory to search in file_pattern: A unix-style wildcard pattern representing the file to find Returns: The path to the file if it was found, otherwise an empty string """ for root, dirnames, fnames in os.walk(search_dir): for fname in fnames: if fnmatch.fnmatch(fname, file_pattern): return os.path.join(root, fname) return ""
python
{ "resource": "" }
q42019
create_page_from_template
train
def create_page_from_template(template_file, output_path): """ Copy the correct html template file to the output directory """ mkdir_p(os.path.dirname(output_path)) shutil.copy(os.path.join(livvkit.resource_dir, template_file), output_path)
python
{ "resource": "" }
q42020
read_json
train
def read_json(file_path): """ Read in a json file and return a dictionary representation """ try: with open(file_path, 'r') as f: config = json_tricks.load(f) except ValueError: print(' '+'!'*58) print(' Woops! Looks the JSON syntax is not valid in:') print(' {}'.format(file_path)) print(' Note: commonly this is a result of having a trailing comma \n in the file') print(' '+'!'*58) raise return config
python
{ "resource": "" }
q42021
write_json
train
def write_json(data, path, file_name): """ Write out data to a json file. Args: data: A dictionary representation of the data to write out path: The directory to output the file in file_name: The name of the file to write out """ if os.path.exists(path) and not os.path.isdir(path): return elif not os.path.exists(path): mkdir_p(path) with open(os.path.join(path, file_name), 'w') as f: json_tricks.dump(data, f, indent=4, primitives=True, allow_nan=True)
python
{ "resource": "" }
q42022
collect_cases
train
def collect_cases(data_dir): """ Find all cases and subcases of a particular run type """ cases = {} for root, dirs, files in os.walk(data_dir): if not dirs: split_case = os.path.relpath(root, data_dir).split(os.path.sep) if split_case[0] not in cases: cases[split_case[0]] = [] cases[split_case[0]].append("-".join(split_case[1:])) return cases
python
{ "resource": "" }
q42023
setup_output
train
def setup_output(cssd=None, jsd=None, imgd=None): """ Set up the directory structure for the output. Copies old run data into a timestamped directory and sets up the new directory """ # Check if we need to back up an old run if os.path.isdir(livvkit.index_dir): print("-------------------------------------------------------------------") print(' Previous output data found in output directory!') try: f = open(os.path.join(livvkit.index_dir, "data.txt"), "r") prev_time = f.readline().replace(":", "").replace("-", "").replace(" ", "_").rstrip() f.close() except IOError: prev_time = "bkd_"+datetime.now().strftime("%Y%m%d_%H%M%S") print(' Backing up data to:') print(' ' + livvkit.index_dir + "_" + prev_time) print("-------------------------------------------------------------------") shutil.move(livvkit.index_dir, livvkit.index_dir + "_" + prev_time) else: print("-------------------------------------------------------------------") # Copy over js, css, & imgs directories from source if cssd: shutil.copytree(cssd, os.path.join(livvkit.index_dir, "css")) else: shutil.copytree(os.path.join(livvkit.resource_dir, "css"), os.path.join(livvkit.index_dir, "css")) if jsd: shutil.copytree(jsd, os.path.join(livvkit.index_dir, "js")) else: shutil.copytree(os.path.join(livvkit.resource_dir, "js"), os.path.join(livvkit.index_dir, "js")) if imgd: shutil.copytree(imgd, os.path.join(livvkit.index_dir, "js")) else: shutil.copytree(os.path.join(livvkit.resource_dir, "imgs"), os.path.join(livvkit.index_dir, "imgs")) # Get the index template from the resource directory shutil.copy(os.path.join(livvkit.resource_dir, "index.html"), os.path.join(livvkit.index_dir, "index.html")) # Record when this data was recorded so we can make nice backups with open(os.path.join(livvkit.index_dir, "data.txt"), "w") as f: f.write(livvkit.timestamp + "\n") f.write(livvkit.comment)
python
{ "resource": "" }
q42024
prepare_query_params
train
def prepare_query_params(**kwargs): """ Prepares given parameters to be used in querystring. """ return [ (sub_key, sub_value) for key, value in kwargs.items() for sub_key, sub_value in expand(value, key) if sub_value is not None ]
python
{ "resource": "" }
q42025
TagCache.count
train
def count(cls, slug): """get the number of objects in the cache for a given slug :param slug: cache key :return: `int` """ from .models import Content # Gets the count for a tag, hopefully form an in-memory cache. cnt = cls._cache.get(slug) if cnt is None: cnt = Content.search_objects.search(tags=[slug]).count() cls._cache[slug] = cnt return cnt
python
{ "resource": "" }
q42026
Text.is_varchar
train
def is_varchar(self): """Determine if a data record is of the type VARCHAR.""" dt = DATA_TYPES['varchar'] if type(self.data) is dt['type'] and len(self.data) < dt['max']: self.type = 'VARCHAR' self.len = len(self.data) return True
python
{ "resource": "" }
q42027
Controls.bind_key_name
train
def bind_key_name(self, function, object_name): """Bind a key to an object name""" for funcname, name in self.name_map.items(): if funcname == function: self.name_map[ funcname] = object_name
python
{ "resource": "" }
q42028
Controls.configure_keys
train
def configure_keys(self): """Configure key map""" self.active_functions = set() self.key2func = {} for funcname, key in self.key_map.items(): self.key2func[key] = getattr(self, funcname)
python
{ "resource": "" }
q42029
_parse_module_list
train
def _parse_module_list(module_list): '''Loop through all the modules and parse them.''' for module_meta in module_list: name = module_meta['module'] # Import & parse module module = import_module(name) output = parse_module(module) # Assign to meta.content module_meta['content'] = output
python
{ "resource": "" }
q42030
_build_module_list
train
def _build_module_list(source_module, index_filename, ignore_modules): '''Builds a list of python modules in the current directory.''' out = [] dirs_with_init = set() module_prefix = '' if source_module == '.' else source_module for root, _, filenames in walk('.'): root = root[2:] module_root = root.replace('/', '.') file_names = [filename[:-3] for filename in filenames if filename.endswith('.py')] for filename in file_names: if filename == '__init__': dirs_with_init.add(root) module_name = '.'.join([module_prefix, module_root]) if module_root else source_module elif not root: module_name = '.'.join([module_prefix, filename]) else: module_name = '.'.join([module_prefix, root.replace('/', '.'), filename]) if module_name.startswith('.'): module_name = module_name[1:] if module_name in ignore_modules: print 'Ignored file: {}{}.py'.format('{}/'.format(root), filename) continue if root and root not in dirs_with_init: print 'No __init__.py, skipping: {}{}.py'.format('{}/'.format(root), filename) continue source_name = '{}.py'.format(filename) if root: source_name = '{}/{}'.format(root, source_name) if filename == '__init__': output_name = '{}.md'.format(index_filename) else: output_name = '{}.md'.format(filename) if root: output_name = '{}/{}'.format(root, output_name) out.append({ 'directory': root, 'file': filename, 'module': module_name, 'output': output_name, 'source': source_name }) return out
python
{ "resource": "" }
q42031
_write_docs
train
def _write_docs(module_list, output_dir): '''Write the document meta to our output location.''' for module_meta in module_list: directory = module_meta['directory'] # Ensure target directory if directory and not path.isdir(directory): makedirs(directory) # Write the file file = open(module_meta['output'], 'w') file.write(module_meta['content']) file.close()
python
{ "resource": "" }
q42032
main
train
def main(): '''Main in a function in case you place a build.py for pydocs inside the root directory.''' options = ''' pydocs Usage: pydocs SOURCE OUTPUT_DIR pydocs SOURCE OUTPUT_DIR [--json] [--index NAME] [--ignore FILE,NAMES] pydocs --help Options: SOURCE Source module, or . for current directory. OUTPUT_DIR The location to output the generated markdown. --json Dump meta in JSON format upon completion. --index NAME Name of the index file (default index.md) to generate. --ignore FILE,NAMES Comma separated modules to ignore/skip. -h --help Show this screen. --version Show version. ''' args = docopt(options) build( getcwd(), args['SOURCE'], args['OUTPUT_DIR'], json_dump=args['--json'], ignore_modules=args['--ignore'], index_filename=args['--index'] or 'index' )
python
{ "resource": "" }
q42033
IxnStatisticsView.read_stats
train
def read_stats(self): """ Reads the statistics view from IXN and saves it in statistics dictionary. """ captions, rows = self._get_pages() name_caption_index = captions.index(self.name_caption) captions.pop(name_caption_index) self.captions = captions self.statistics = OrderedDict() for row in rows: name = row.pop(name_caption_index) self.statistics[name] = row
python
{ "resource": "" }
q42034
check_cmake_exists
train
def check_cmake_exists(cmake_command): """ Check whether CMake is installed. If not, print informative error message and quits. """ from subprocess import Popen, PIPE p = Popen( '{0} --version'.format(cmake_command), shell=True, stdin=PIPE, stdout=PIPE) if not ('cmake version' in p.communicate()[0].decode('UTF-8')): sys.stderr.write(' This code is built using CMake\n\n') sys.stderr.write(' CMake is not found\n') sys.stderr.write(' get CMake at http://www.cmake.org/\n') sys.stderr.write(' on many clusters CMake is installed\n') sys.stderr.write(' but you have to load it first:\n') sys.stderr.write(' $ module load cmake\n') sys.exit(1)
python
{ "resource": "" }
q42035
setup_build_path
train
def setup_build_path(build_path): """ Create build directory. If this already exists, print informative error message and quit. """ if os.path.isdir(build_path): fname = os.path.join(build_path, 'CMakeCache.txt') if os.path.exists(fname): sys.stderr.write('aborting setup\n') sys.stderr.write( 'build directory {0} which contains CMakeCache.txt already exists\n'. format(build_path)) sys.stderr.write( 'remove the build directory and then rerun setup\n') sys.exit(1) else: os.makedirs(build_path, 0o755)
python
{ "resource": "" }
q42036
run_cmake
train
def run_cmake(command, build_path, default_build_path): """ Execute CMake command. """ from subprocess import Popen, PIPE from shutil import rmtree topdir = os.getcwd() p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout_coded, stderr_coded = p.communicate() stdout = stdout_coded.decode('UTF-8') stderr = stderr_coded.decode('UTF-8') # print cmake output to screen print(stdout) if stderr: # we write out stderr but we do not stop yet # this is because CMake warnings are sent to stderr # and they might be benign sys.stderr.write(stderr) # write cmake output to file with open(os.path.join(build_path, 'cmake_output'), 'w') as f: f.write(stdout) # change directory and return os.chdir(topdir) # to figure out whether configuration was a success # we check for 3 sentences that should be part of stdout configuring_done = '-- Configuring done' in stdout generating_done = '-- Generating done' in stdout build_files_written = '-- Build files have been written to' in stdout configuration_successful = configuring_done and generating_done and build_files_written if configuration_successful: save_setup_command(sys.argv, build_path) print_build_help(build_path, default_build_path) else: if (build_path == default_build_path): # remove build_path iff not set by the user # otherwise removal can be dangerous rmtree(default_build_path)
python
{ "resource": "" }
q42037
print_build_help
train
def print_build_help(build_path, default_build_path): """ Print help text after configuration step is done. """ print(' configure step is done') print(' now you need to compile the sources:') if (build_path == default_build_path): print(' $ cd build') else: print(' $ cd ' + build_path) print(' $ make')
python
{ "resource": "" }
q42038
save_setup_command
train
def save_setup_command(argv, build_path): """ Save setup command to a file. """ file_name = os.path.join(build_path, 'setup_command') with open(file_name, 'w') as f: f.write(' '.join(argv[:]) + '\n')
python
{ "resource": "" }
q42039
configure
train
def configure(root_directory, build_path, cmake_command, only_show): """ Main configure function. """ default_build_path = os.path.join(root_directory, 'build') # check that CMake is available, if not stop check_cmake_exists('cmake') # deal with build path if build_path is None: build_path = default_build_path if not only_show: setup_build_path(build_path) cmake_command += ' -B' + build_path print('{0}\n'.format(cmake_command)) if only_show: sys.exit(0) run_cmake(cmake_command, build_path, default_build_path)
python
{ "resource": "" }
q42040
PollSessionsAPI.create_single_poll_session
train
def create_single_poll_session(self, poll_id, poll_sessions_course_id, poll_sessions_course_section_id=None, poll_sessions_has_public_results=None): """ Create a single poll session. Create a new poll session for this poll """ path = {} data = {} params = {} # REQUIRED - PATH - poll_id """ID""" path["poll_id"] = poll_id # REQUIRED - poll_sessions[course_id] """The id of the course this session is associated with.""" data["poll_sessions[course_id]"] = poll_sessions_course_id # OPTIONAL - poll_sessions[course_section_id] """The id of the course section this session is associated with.""" if poll_sessions_course_section_id is not None: data["poll_sessions[course_section_id]"] = poll_sessions_course_section_id # OPTIONAL - poll_sessions[has_public_results] """Whether or not results are viewable by students.""" if poll_sessions_has_public_results is not None: data["poll_sessions[has_public_results]"] = poll_sessions_has_public_results self.logger.debug("POST /api/v1/polls/{poll_id}/poll_sessions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/polls/{poll_id}/poll_sessions".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42041
MercurialInProcManager._invoke
train
def _invoke(self, *params): """ Run the self.exe command in-process with the supplied params. """ cmd = [self.exe, '-R', self.location] + list(params) with reentry.in_process_context(cmd) as result: sys.modules['mercurial.dispatch'].run() stdout = result.stdio.stdout.getvalue() stderr = result.stdio.stderr.getvalue() if not result.returncode == 0: raise RuntimeError(stderr.strip() or stdout.strip()) return stdout.decode('utf-8')
python
{ "resource": "" }
q42042
RabaPupa.getDctDescription
train
def getDctDescription(self) : "returns a dict describing the object" return {'type' : RabaFields.RABA_FIELD_TYPE_IS_RABA_OBJECT, 'className' : self._rabaClass.__name__, 'raba_id' : self.raba_id, 'raba_namespace' : self._raba_namespace}
python
{ "resource": "" }
q42043
Raba.dropIndex
train
def dropIndex(cls, fields) : "removes an index created with ensureIndex " con = RabaConnection(cls._raba_namespace) rlf, ff = cls._parseIndex(fields) for name in rlf : con.dropIndex(name, 'anchor_raba_id') con.dropIndex(cls.__name__, ff) con.commit()
python
{ "resource": "" }
q42044
Raba.getIndexes
train
def getIndexes(cls) : "returns a list of the indexes of a class" con = RabaConnection(cls._raba_namespace) idxs = [] for idx in con.getIndexes(rabaOnly = True) : if idx[2] == cls.__name__ : idxs.append(idx) else : for k in cls.columns : if RabaFields.isRabaListField(getattr(cls, k)) and idx[2] == con.makeRabaListTableName(cls.__name__, k) : idxs.append(idx) return idxs
python
{ "resource": "" }
q42045
Raba.flushIndexes
train
def flushIndexes(cls) : "drops all indexes for a class" con = RabaConnection(cls._raba_namespace) for idx in cls.getIndexes() : con.dropIndexByName(idx[1])
python
{ "resource": "" }
q42046
Raba.getFields
train
def getFields(cls) : """returns a set of the available fields. In order to be able ti securely loop of the fields, "raba_id" and "json" are not included in the set""" s = set(cls.columns.keys()) s.remove('json') s.remove('raba_id') return s
python
{ "resource": "" }
q42047
RabaListPupa._attachToObject
train
def _attachToObject(self, anchorObj, relationName) : "dummy fct for compatibility reasons, a RabaListPupa is attached by default" #MutableSequence.__getattribute__(self, "develop")() self.develop() self._attachToObject(anchorObj, relationName)
python
{ "resource": "" }
q42048
RabaList.pupatizeElements
train
def pupatizeElements(self) : """Transform all raba object into pupas""" for i in range(len(self)) : self[i] = self[i].pupa()
python
{ "resource": "" }
q42049
RabaList._attachToObject
train
def _attachToObject(self, anchorObj, relationName) : "Attaches the rabalist to a raba object. Only attached rabalists can be saved" if self.anchorObj == None : self.relationName = relationName self.anchorObj = anchorObj self._setNamespaceConAndConf(anchorObj._rabaClass._raba_namespace) self.tableName = self.connection.makeRabaListTableName(self.anchorObj._rabaClass.__name__, self.relationName) faultyElmt = self._checkSelf() if faultyElmt != None : raise ValueError("Element %s violates specified list or relation constraints" % faultyElmt) elif self.anchorObj is not anchorObj : raise ValueError("Ouch: attempt to steal rabalist, use RabaLict.copy() instead.\nthief: %s\nvictim: %s\nlist: %s" % (anchorObj, self.anchorObj, self))
python
{ "resource": "" }
q42050
FilesAPI.list_files_courses
train
def list_files_courses(self, course_id, content_types=None, include=None, only=None, order=None, search_term=None, sort=None): """ List files. Returns the paginated list of files for the folder or course. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - content_types """Filter results by content-type. You can specify type/subtype pairs (e.g., 'image/jpeg'), or simply types (e.g., 'image', which will match 'image/gif', 'image/jpeg', etc.).""" if content_types is not None: params["content_types"] = content_types # OPTIONAL - search_term """The partial name of the files to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - include """Array of additional information to include. "user":: the user who uploaded the file or last edited its content "usage_rights":: copyright and license information for the file (see UsageRights)""" if include is not None: self._validate_enum(include, ["user"]) params["include"] = include # OPTIONAL - only """Array of information to restrict to. Overrides include[] "names":: only returns file name information""" if only is not None: params["only"] = only # OPTIONAL - sort """Sort results by this field. Defaults to 'name'. Note that `sort=user` implies `include[]=user`.""" if sort is not None: self._validate_enum(sort, ["name", "size", "created_at", "updated_at", "content_type", "user"]) params["sort"] = sort # OPTIONAL - order """The sorting order. Defaults to 'asc'.""" if order is not None: self._validate_enum(order, ["asc", "desc"]) params["order"] = order self.logger.debug("GET /api/v1/courses/{course_id}/files with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/files".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42051
FilesAPI.get_public_inline_preview_url
train
def get_public_inline_preview_url(self, id, submission_id=None): """ Get public inline preview url. Determine the URL that should be used for inline preview of the file. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - submission_id """The id of the submission the file is associated with. Provide this argument to gain access to a file that has been submitted to an assignment (Canvas will verify that the file belongs to the submission and the calling user has rights to view the submission).""" if submission_id is not None: params["submission_id"] = submission_id self.logger.debug("GET /api/v1/files/{id}/public_url with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/files/{id}/public_url".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42052
FilesAPI.get_file_courses
train
def get_file_courses(self, id, course_id, include=None): """ Get file. Returns the standard attachment json object """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - include """Array of additional information to include. "user":: the user who uploaded the file or last edited its content "usage_rights":: copyright and license information for the file (see UsageRights)""" if include is not None: self._validate_enum(include, ["user"]) params["include"] = include self.logger.debug("GET /api/v1/courses/{course_id}/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/files/{id}".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42053
FilesAPI.update_file
train
def update_file(self, id, hidden=None, lock_at=None, locked=None, name=None, on_duplicate=None, parent_folder_id=None, unlock_at=None): """ Update file. Update some settings on the specified file """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - name """The new display name of the file""" if name is not None: data["name"] = name # OPTIONAL - parent_folder_id """The id of the folder to move this file into. The new folder must be in the same context as the original parent folder. If the file is in a context without folders this does not apply.""" if parent_folder_id is not None: data["parent_folder_id"] = parent_folder_id # OPTIONAL - on_duplicate """If the file is moved to a folder containing a file with the same name, or renamed to a name matching an existing file, the API call will fail unless this parameter is supplied. "overwrite":: Replace the existing file with the same name "rename":: Add a qualifier to make the new filename unique""" if on_duplicate is not None: self._validate_enum(on_duplicate, ["overwrite", "rename"]) data["on_duplicate"] = on_duplicate # OPTIONAL - lock_at """The datetime to lock the file at""" if lock_at is not None: data["lock_at"] = lock_at # OPTIONAL - unlock_at """The datetime to unlock the file at""" if unlock_at is not None: data["unlock_at"] = unlock_at # OPTIONAL - locked """Flag the file as locked""" if locked is not None: data["locked"] = locked # OPTIONAL - hidden """Flag the file as hidden""" if hidden is not None: data["hidden"] = hidden self.logger.debug("PUT /api/v1/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/files/{id}".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42054
FilesAPI.update_folder
train
def update_folder(self, id, hidden=None, lock_at=None, locked=None, name=None, parent_folder_id=None, position=None, unlock_at=None): """ Update folder. Updates a folder """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - name """The new name of the folder""" if name is not None: data["name"] = name # OPTIONAL - parent_folder_id """The id of the folder to move this folder into. The new folder must be in the same context as the original parent folder.""" if parent_folder_id is not None: data["parent_folder_id"] = parent_folder_id # OPTIONAL - lock_at """The datetime to lock the folder at""" if lock_at is not None: data["lock_at"] = lock_at # OPTIONAL - unlock_at """The datetime to unlock the folder at""" if unlock_at is not None: data["unlock_at"] = unlock_at # OPTIONAL - locked """Flag the folder as locked""" if locked is not None: data["locked"] = locked # OPTIONAL - hidden """Flag the folder as hidden""" if hidden is not None: data["hidden"] = hidden # OPTIONAL - position """Set an explicit sort position for the folder""" if position is not None: data["position"] = position self.logger.debug("PUT /api/v1/folders/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/folders/{id}".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42055
FilesAPI.create_folder_courses
train
def create_folder_courses(self, name, course_id, hidden=None, lock_at=None, locked=None, parent_folder_id=None, parent_folder_path=None, position=None, unlock_at=None): """ Create folder. Creates a folder in the specified context """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - name """The name of the folder""" data["name"] = name # OPTIONAL - parent_folder_id """The id of the folder to store the file in. If this and parent_folder_path are sent an error will be returned. If neither is given, a default folder will be used.""" if parent_folder_id is not None: data["parent_folder_id"] = parent_folder_id # OPTIONAL - parent_folder_path """The path of the folder to store the new folder in. The path separator is the forward slash `/`, never a back slash. The parent folder will be created if it does not already exist. This parameter only applies to new folders in a context that has folders, such as a user, a course, or a group. If this and parent_folder_id are sent an error will be returned. If neither is given, a default folder will be used.""" if parent_folder_path is not None: data["parent_folder_path"] = parent_folder_path # OPTIONAL - lock_at """The datetime to lock the folder at""" if lock_at is not None: data["lock_at"] = lock_at # OPTIONAL - unlock_at """The datetime to unlock the folder at""" if unlock_at is not None: data["unlock_at"] = unlock_at # OPTIONAL - locked """Flag the folder as locked""" if locked is not None: data["locked"] = locked # OPTIONAL - hidden """Flag the folder as hidden""" if hidden is not None: data["hidden"] = hidden # OPTIONAL - position """Set an explicit sort position for the folder""" if position is not None: data["position"] = position self.logger.debug("POST /api/v1/courses/{course_id}/folders with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/folders".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42056
FilesAPI.delete_folder
train
def delete_folder(self, id, force=None): """ Delete folder. Remove the specified folder. You can only delete empty folders unless you set the 'force' flag """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - force """Set to 'true' to allow deleting a non-empty folder""" if force is not None: params["force"] = force self.logger.debug("DELETE /api/v1/folders/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/folders/{id}".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42057
FilesAPI.set_usage_rights_courses
train
def set_usage_rights_courses(self, file_ids, course_id, usage_rights_use_justification, folder_ids=None, publish=None, usage_rights_legal_copyright=None, usage_rights_license=None): """ Set usage rights. Sets copyright and license information for one or more files """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - file_ids """List of ids of files to set usage rights for.""" data["file_ids"] = file_ids # OPTIONAL - folder_ids """List of ids of folders to search for files to set usage rights for. Note that new files uploaded to these folders do not automatically inherit these rights.""" if folder_ids is not None: data["folder_ids"] = folder_ids # OPTIONAL - publish """Whether the file(s) or folder(s) should be published on save, provided that usage rights have been specified (set to `true` to publish on save).""" if publish is not None: data["publish"] = publish # REQUIRED - usage_rights[use_justification] """The intellectual property justification for using the files in Canvas""" self._validate_enum(usage_rights_use_justification, ["own_copyright", "used_by_permission", "fair_use", "public_domain", "creative_commons"]) data["usage_rights[use_justification]"] = usage_rights_use_justification # OPTIONAL - usage_rights[legal_copyright] """The legal copyright line for the files""" if usage_rights_legal_copyright is not None: data["usage_rights[legal_copyright]"] = usage_rights_legal_copyright # OPTIONAL - usage_rights[license] """The license that applies to the files. See the {api:UsageRightsController#licenses List licenses endpoint} for the supported license types.""" if usage_rights_license is not None: data["usage_rights[license]"] = usage_rights_license self.logger.debug("PUT /api/v1/courses/{course_id}/usage_rights with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/usage_rights".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42058
import_class
train
def import_class(name): """Load class from fully-qualified python module name. ex: import_class('bulbs.content.models.Content') """ module, _, klass = name.rpartition('.') mod = import_module(module) return getattr(mod, klass)
python
{ "resource": "" }
q42059
RepoManager.get_valid_managers
train
def get_valid_managers(cls, location): """ Get the valid RepoManagers for this location. """ def by_priority_attr(c): return getattr(c, 'priority', 0) classes = sorted( iter_subclasses(cls), key=by_priority_attr, reverse=True) all_managers = (c(location) for c in classes) return (mgr for mgr in all_managers if mgr.is_valid())
python
{ "resource": "" }
q42060
RepoManager.find_all_files
train
def find_all_files(self): """ Find files including those in subrepositories. """ files = self.find_files() subrepo_files = ( posixpath.join(subrepo.location, filename) for subrepo in self.subrepos() for filename in subrepo.find_files() ) return itertools.chain(files, subrepo_files)
python
{ "resource": "" }
q42061
get_cmd_out
train
def get_cmd_out(command): '''Get the output of a command. Gets a nice Unicode no-extra-whitespace string of the ``stdout`` of a given command. Args: command (str or list): A string of the command, or a list of the arguments (as would be used in :class:`subprocess.Popen`). Note: If ``command`` is a ``str``, it will be evaluated with ``shell=True`` i.e. in the default shell (for example, bash). Returns: str: The ``stdout`` of the command.''' if isinstance(command, list): result = sp.check_output(command) else: result = sp.check_output(command, shell=True) return result.decode('utf-8').rstrip()
python
{ "resource": "" }
q42062
get_name
train
def get_name(): '''Get desktop environment or OS. Get the OS name or desktop environment. **List of Possible Values** +-------------------------+---------------+ | Windows | windows | +-------------------------+---------------+ | Mac OS X | mac | +-------------------------+---------------+ | GNOME 3+ | gnome | +-------------------------+---------------+ | GNOME 2 | gnome2 | +-------------------------+---------------+ | XFCE | xfce4 | +-------------------------+---------------+ | KDE | kde | +-------------------------+---------------+ | Unity | unity | +-------------------------+---------------+ | LXDE | lxde | +-------------------------+---------------+ | i3wm | i3 | +-------------------------+---------------+ | \*box | \*box | +-------------------------+---------------+ | Trinity (KDE 3 fork) | trinity | +-------------------------+---------------+ | MATE | mate | +-------------------------+---------------+ | IceWM | icewm | +-------------------------+---------------+ | Pantheon (elementaryOS) | pantheon | +-------------------------+---------------+ | LXQt | lxqt | +-------------------------+---------------+ | Awesome WM | awesome | +-------------------------+---------------+ | Enlightenment | enlightenment | +-------------------------+---------------+ | AfterStep | afterstep | +-------------------------+---------------+ | WindowMaker | windowmaker | +-------------------------+---------------+ | [Other] | unknown | +-------------------------+---------------+ Returns: str: The name of the desktop environment or OS. ''' if sys.platform in ['win32', 'cygwin']: return 'windows' elif sys.platform == 'darwin': return 'mac' else: desktop_session = os.environ.get( 'XDG_CURRENT_DESKTOP') or os.environ.get('DESKTOP_SESSION') if desktop_session is not None: desktop_session = desktop_session.lower() # Fix for X-Cinnamon etc if desktop_session.startswith('x-'): desktop_session = desktop_session.replace('x-', '') if desktop_session in ['gnome', 'unity', 'cinnamon', 'mate', 'xfce4', 'lxde', 'fluxbox', 'blackbox', 'openbox', 'icewm', 'jwm', 'afterstep', 'trinity', 'kde', 'pantheon', 'i3', 'lxqt', 'awesome', 'enlightenment']: return desktop_session #-- Special cases --# # Canonical sets environment var to Lubuntu rather than # LXDE if using LXDE. # There is no guarantee that they will not do the same # with the other desktop environments. elif 'xfce' in desktop_session: return 'xfce4' elif desktop_session.startswith('ubuntu'): return 'unity' elif desktop_session.startswith('xubuntu'): return 'xfce4' elif desktop_session.startswith('lubuntu'): return 'lxde' elif desktop_session.startswith('kubuntu'): return 'kde' elif desktop_session.startswith('razor'): return 'razor-qt' elif desktop_session.startswith('wmaker'): return 'windowmaker' if os.environ.get('KDE_FULL_SESSION') == 'true': return 'kde' elif os.environ.get('GNOME_DESKTOP_SESSION_ID'): if not 'deprecated' in os.environ.get('GNOME_DESKTOP_SESSION_ID'): return 'gnome2' elif is_running('xfce-mcs-manage'): return 'xfce4' elif is_running('ksmserver'): return 'kde' return 'unknown'
python
{ "resource": "" }
q42063
is_in_path
train
def is_in_path(program): ''' Check if a program is in the system ``PATH``. Checks if a given program is in the user's ``PATH`` or not. Args: program (str): The program to try to find in ``PATH``. Returns: bool: Is the program in ``PATH``? ''' if sys.version_info.major == 2: path = os.getenv('PATH') if os.name == 'nt': path = path.split(';') else: path = path.split(':') else: path = os.get_exec_path() for i in path: if os.path.isdir(i): if program in os.listdir(i): return True
python
{ "resource": "" }
q42064
is_running
train
def is_running(process): ''' Check if process is running. Check if the given process name is running or not. Note: On a Linux system, kernel threads (like ``kthreadd`` etc.) are excluded. Args: process (str): The name of the process. Returns: bool: Is the process running? ''' if os.name == 'nt': process_list = get_cmd_out(['tasklist', '/v']) return process in process_list else: process_list = get_cmd_out('ps axw | awk \'{print $5}\'') for i in process_list.split('\n'): # 'COMMAND' is the column heading # [*] indicates kernel-level processes like \ # kthreadd, which manages threads in the Linux kernel if not i == 'COMMAND' or i.startswith('['): if i == process: return True elif os.path.basename(i) == process: # check i without executable path # for example, if 'process' arguments is 'sshd' # and '/usr/bin/sshd' is listed in ps, return True return True return False
python
{ "resource": "" }
q42065
parse_datetime
train
def parse_datetime(value): """Returns a datetime object for a given argument This helps to convert strings, dates and datetimes to proper tz-enabled datetime objects.""" if isinstance(value, (string_types, text_type, binary_type)): value = dateutil.parser.parse(value) value.replace(tzinfo=dateutil.tz.tzutc()) return value elif isinstance(value, datetime.datetime): value.replace(tzinfo=dateutil.tz.tzutc()) return value elif isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) value.replace(tzinfo=dateutil.tz.tzutc()) return value else: raise ValueError('Value must be parsable to datetime object. Got `{}`'.format(type(value)))
python
{ "resource": "" }
q42066
NegateQueryFilter
train
def NegateQueryFilter(es_query): # noqa """ Return a filter removing the contents of the provided query. """ query = es_query.to_dict().get("query", {}) filtered = query.get("filtered", {}) negated_filter = filtered.get("filter", {}) return Not(**negated_filter)
python
{ "resource": "" }
q42067
Request.body_template
train
def body_template(self, value): """ Must be an instance of a prestans.types.DataCollection subclass; this is generally set during the RequestHandler lifecycle. Setting this spwans the parsing process of the body. If the HTTP verb is GET an AssertionError is thrown. Use with extreme caution. """ if self.method == VERB.GET: raise AssertionError("body_template cannot be set for GET requests") if value is None: self.logger.warning("body_template is None, parsing will be ignored") return if not isinstance(value, DataCollection): msg = "body_template must be an instance of %s.%s" % ( DataCollection.__module__, DataCollection.__name__ ) raise AssertionError(msg) self._body_template = value # get a deserializer based on the Content-Type header # do this here so the handler gets a chance to setup extra serializers self.set_deserializer_by_mime_type(self.content_type)
python
{ "resource": "" }
q42068
Request.get_response_attribute_filter
train
def get_response_attribute_filter(self, template_filter, template_model=None): """ Prestans-Response-Attribute-List can contain a client's requested definition for attributes required in the response. This should match the response_attribute_filter_template? :param template_filter: :param template_model: the expected model that this filter corresponds to :return: :rtype: None | AttributeFilter """ if template_filter is None: return None if 'Prestans-Response-Attribute-List' not in self.headers: return None # header not set results in a None attribute_list_str = self.headers['Prestans-Response-Attribute-List'] # deserialize the header contents json_deserializer = deserializer.JSON() attribute_list_dictionary = json_deserializer.loads(attribute_list_str) # construct an AttributeFilter attribute_filter = AttributeFilter( from_dictionary=attribute_list_dictionary, template_model=template_model ) #: Check template? Do this even through we might have template_model #: in case users have made a custom filter evaluated_filter = attribute_filter.conforms_to_template_filter(template_filter) return evaluated_filter
python
{ "resource": "" }
q42069
Command.version
train
def version(self): """ Return the underlying version """ lines = iter(self._invoke('version').splitlines()) version = next(lines).strip() return self._parse_version(version)
python
{ "resource": "" }
q42070
Mercurial.find_files
train
def find_files(self): """ Find versioned files in self.location """ all_files = self._invoke('locate', '-I', '.').splitlines() # now we have a list of all files in self.location relative to # self.find_root() # Remove the parent dirs from them. from_root = os.path.relpath(self.location, self.find_root()) loc_rel_paths = [ os.path.relpath(path, from_root) for path in all_files] return loc_rel_paths
python
{ "resource": "" }
q42071
Mercurial._get_rev_num
train
def _get_rev_num(self, rev=None): """ Determine the revision number for a given revision specifier. """ # first, determine the numeric ID cmd = ['identify', '--num'] # workaround for #4 cmd.extend(['--config', 'defaults.identify=']) if rev: cmd.extend(['--rev', rev]) res = self._invoke(*cmd) return res.strip()
python
{ "resource": "" }
q42072
Mercurial._get_tags_by_num
train
def _get_tags_by_num(self): """ Return a dictionary mapping revision number to tags for that number. """ by_revision = operator.attrgetter('revision') tags = sorted(self.get_tags(), key=by_revision) revision_tags = itertools.groupby(tags, key=by_revision) def get_id(rev): return rev.split(':', 1)[0] return dict( (get_id(rev), [tr.tag for tr in tr_list]) for rev, tr_list in revision_tags )
python
{ "resource": "" }
q42073
Git.get_tags
train
def get_tags(self, rev=None): """ Return the tags for the current revision as a set """ rev = rev or 'HEAD' return set(self._invoke('tag', '--points-at', rev).splitlines())
python
{ "resource": "" }
q42074
Page.validate_template_name
train
def validate_template_name(self, key, value): """Validate template name. :param key: The template path. :param value: The template name. :raises ValueError: If template name is wrong. """ if value not in dict(current_app.config['PAGES_TEMPLATES']): raise ValueError( 'Template "{0}" does not exist.'.format(value)) return value
python
{ "resource": "" }
q42075
add_item
train
def add_item(name, command, system_wide=False): '''Adds a program to startup. Adds a program to user startup. Args: name (str) : The name of the startup entry. command (str) : The command to run. system_wide (bool): Add to system-wide startup. Note: ``system_wide`` requires superuser/admin privileges. ''' desktop_env = system.get_name() if os.path.isfile(command): command_is_file = True if not desktop_env == 'windows': # Will not exit program if insufficient permissions sp.Popen(['chmod +x %s' % command], shell=True) if desktop_env == 'windows': import winreg if system_wide: startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') else: startup_dir = os.path.join(get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') if not command_is_file: with open(os.path.join(startup_dir, name + '.bat'), 'w') as f: f.write(command) else: shutil.copy(command, startup_dir) elif desktop_env == 'mac': sp.Popen(['launchctl submit -l %s -- %s'] % (name, command), shell=True) # system-wide will be handled by running the above as root # which will auto-happen if current process is root. else: # Linux/Unix if desktop_env == 'unknown': # CLI if system_wide: login_file = '/etc/profile' else: login_file = os.path.expanduser('~/.profile') with open(login_file, 'a') as f: f.write(command) else: try: desktop_file_name = name + '.desktop' startup_file = os.path.join(get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name) # .desktop files' Terminal option uses an independent method to find terminal emulator desktop_str = desktopfile.construct(name=name, exec_=command, additional_opts={'X-GNOME-Autostart-enabled': 'true'}) with open(startup_file, 'w') as f: f.write(desktop_str) except: pass
python
{ "resource": "" }
q42076
remove_item
train
def remove_item(name, system_wide=False): '''Removes a program from startup. Removes a program from startup. Args: name (str) : The name of the program (as known to the system) to remove. See :func:``list_items``. system_wide (bool): Remove it from system-wide startup. Note: ``system_wide`` requires superuser/admin privileges. ''' desktop_env = system.get_name() if desktop_env == 'windows': import winreg if system_wide: startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') else: startup_dir = os.path.join(directories.get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') for startup_file in os.path.listdir(start_dir): if startup_file == name or startup_file.split('.')[0] == name: os.remove(os.path.join(startup_dir, startup_file)) elif desktop_env == 'mac': sp.Popen(['launchctl', 'remove', name]) # system-wide will be handled by running the above as root # which will auto-happen if current process is root. else: # Linux/Unix if desktop_env == 'unknown': # CLI if system_wide: login_file = '/etc/profile' else: login_file = os.path.expanduser('~/.profile') with open(login_file) as f: login_file_contents = f.read() final_login_file_contents = '' for line in login_file_contents.split('\n'): if line.split(' ')[0] != name: final_login_file_contents += line with open(login_file, 'w') as f: f.write(final_login_file_contents) else: try: desktop_file_name = name + '.desktop' startup_file = os.path.join(directories.get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name) if not os.path.isfile(startup_file): for possible_startup_file in os.listdir(directories.get_config_dir('autostart', system_wide=system_wide)[0]): possible_startup_file_parsed = desktopfile.parse(possible_startup_file) if possible_startup_file_parsed['Name'] == name: startup_file = possible_startup_file os.remove(startup_file) except IndexError: pass
python
{ "resource": "" }
q42077
AppEngineAuthContextProvider.get_current_user
train
def get_current_user(self): """ Override get_current_user for Google AppEngine Checks for oauth capable request first, if this fails fall back to standard users API """ from google.appengine.api import users if _IS_DEVELOPMENT_SERVER: return users.get_current_user() else: from google.appengine.api import oauth try: user = oauth.get_current_user() except oauth.OAuthRequestError: user = users.get_current_user() return user
python
{ "resource": "" }
q42078
Bam.et2roc
train
def et2roc(et_fo, roc_fo): """ET to ROC conversion. Args: et_fo (file): File object for the ET file. roc_fo (file): File object for the ROC file. raises: ValueError """ stats_dicts = [ { "q": q, "M": 0, "w": 0, "m": 0, "P": 0, "U": 0, "u": 0, "T": 0, "t": 0, "x": 0 } for q in range(rnftools.lavender.MAXIMAL_MAPPING_QUALITY + 1) ] for line in et_fo: line = line.strip() if line != "" and line[0] != "#": (read_tuple_name, tab, info_categories) = line.partition("\t") intervals = info_categories.split(",") for interval in intervals: category = interval[0] (left, colon, right) = interval[2:].partition("-") for q in range(int(left), int(right) + 1): stats_dicts[q][category] += 1 roc_fo.write("# Numbers of reads in several categories in dependence" + os.linesep) roc_fo.write("# on the applied threshold on mapping quality q" + os.linesep) roc_fo.write("# " + os.linesep) roc_fo.write("# Categories:" + os.linesep) roc_fo.write("# M: Mapped correctly." + os.linesep) roc_fo.write("# w: Mapped to a wrong position." + os.linesep) roc_fo.write("# m: Mapped but should be unmapped." + os.linesep) roc_fo.write("# P: Multimapped." + os.linesep) roc_fo.write("# U: Unmapped and should be unmapped." + os.linesep) roc_fo.write("# u: Unmapped but should be mapped." + os.linesep) roc_fo.write("# T: Thresholded correctly." + os.linesep) roc_fo.write("# t: Thresholded incorrectly." + os.linesep) roc_fo.write("# x: Unknown." + os.linesep) roc_fo.write("#" + os.linesep) roc_fo.write("# q\tM\tw\tm\tP\tU\tu\tT\tt\tx\tall" + os.linesep) l_numbers = [] for line in stats_dicts: numbers = [ line["M"], line["w"], line["m"], line["P"], line["U"], line["u"], line["T"], line["t"], line["x"] ] if numbers != l_numbers: roc_fo.write("\t".join([str(line["q"])] + list(map(str, numbers)) + [str(sum(numbers))]) + os.linesep) l_numbers = numbers
python
{ "resource": "" }
q42079
Bam.create_roc
train
def create_roc(self): """Create a ROC file for this BAM file. raises: ValueError """ with (gzip.open(self._et_fn, "tr") if self.compress_intermediate_files else open(self._et_fn, "r")) as et_fo: with open(self._roc_fn, "w+") as roc_fo: self.et2roc( et_fo=et_fo, roc_fo=roc_fo, )
python
{ "resource": "" }
q42080
Bam.create_graphics
train
def create_graphics(self): """Create images related to this BAM file using GnuPlot.""" rnftools.utils.shell('"{}" "{}"'.format("gnuplot", self._gp_fn)) if self.render_pdf_method is not None: svg_fn = self._svg_fn pdf_fn = self._pdf_fn svg42pdf(svg_fn, pdf_fn, method=self.render_pdf_method)
python
{ "resource": "" }
q42081
photparse
train
def photparse(tab): """ Parse through a photometry table to group by source_id Parameters ---------- tab: list SQL query dictionary list from running query_dict.execute() Returns ------- newtab: list Dictionary list after parsing to group together sources """ # Check that source_id column is present if 'source_id' not in tab[0].keys(): raise KeyError('phot=TRUE requires the source_id columb be included') # Loop through the table and grab unique band names and source IDs uniqueid = [] for i in range(len(tab)): tmpid = tab[i]['source_id'] if tmpid not in uniqueid: uniqueid.append(tmpid) # Loop over unique id and create a new table for each element in it newtab = [] for sourceid in uniqueid: tmpdict = photaddline(tab, sourceid) newtab.append(tmpdict) return newtab
python
{ "resource": "" }
q42082
user_order_by
train
def user_order_by(self, field): """ Queryset method ordering objects by user ordering field. """ # Get ordering model. model_label = order.utils.resolve_labels('.'.join(\ [self.model._meta.app_label, self.model._meta.object_name])) orderitem_set = getattr(self.model, \ order.utils.resolve_order_item_related_set_name(model_label)) order_model = orderitem_set.related.model # Resolve ordering model table name. db_table = order_model._meta.db_table # Add ordering field as extra queryset fields. pk_name = self.model._meta.pk.attname # If we have a descending query remove '-' from field name when quering. sanitized_field = field.lstrip('-') extra_select = { sanitized_field: '(SELECT %s from %s WHERE item_id=%s.%s)' % \ (sanitized_field, db_table, self.model._meta.db_table, pk_name) } # Use original field name when ordering to allow for descending. return self.extra(select=extra_select).all().order_by(field)
python
{ "resource": "" }
q42083
FeatureFlagsAPI.list_enabled_features_accounts
train
def list_enabled_features_accounts(self, account_id): """ List enabled features. List all features that are enabled on a given Account, Course, or User. Only the feature names are returned. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id self.logger.debug("GET /api/v1/accounts/{account_id}/features/enabled with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/features/enabled".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42084
AppConfigViewMixin.get_nav_menu
train
def get_nav_menu(self): """Method to generate the menu""" _menu = self.get_site_menu() if _menu: site_menu = list(_menu) else: site_menu = [] had_urls = [] def get_url(menu, had_urls): if 'url' in menu: had_urls.append(menu['url']) if 'menus' in menu: for m in menu['menus']: get_url(m, had_urls) get_url({'menus': site_menu}, had_urls) # get base menu with apps already configurated nav_menu = collections.OrderedDict(self.get_apps_menu()) for model, model_admin in self.admin_site._registry.items(): if getattr(model_admin, 'hidden_menu', False): continue app_config = getattr(model_admin, 'app_config', None) app_label = app_config.name if app_config else model._meta.app_label model_dict = { 'title': smart_text(capfirst(model._meta.verbose_name_plural)), 'url': self.get_model_url(model, "changelist"), 'icon': self.get_model_icon(model), 'perm': self.get_model_perm(model, 'view'), 'order': model_admin.order, } if model_dict['url'] in had_urls: continue app_key = "app:%s" % app_label if app_key in nav_menu: nav_menu[app_key]['menus'].append(model_dict) else: # first time the app is seen # Find app title if app_config: app_title = model_admin.app_config.verbose_name else: app_title = smart_text(app_label.title()) if app_label.lower() in self.apps_label_title: app_title = self.apps_label_title[app_label.lower()] else: mods = model.__module__.split('.') if len(mods) > 1: mod = '.'.join(mods[0:-1]) if mod in sys.modules: mod = sys.modules[mod] if 'verbose_name' in dir(mod): app_title = getattr(mod, 'verbose_name') elif 'app_title' in dir(mod): app_title = getattr(mod, 'app_title') nav_menu[app_key] = { 'title': app_title, 'menus': [model_dict], } app_menu = nav_menu[app_key] if ('first_icon' not in app_menu or app_menu['first_icon'] == self.default_model_icon) and model_dict.get('icon'): app_menu['first_icon'] = model_dict['icon'] if 'first_url' not in app_menu and model_dict.get('url'): app_menu['first_url'] = model_dict['url'] # after app menu is done, join it to the site menu nav_menu = nav_menu.values() site_menu.extend(nav_menu) for menu in site_menu: menu['menus'].sort(key=sortkeypicker(['order', 'title'])) site_menu.sort(key=lambda x: x['title']) return site_menu
python
{ "resource": "" }
q42085
OutcomeResultsAPI.get_outcome_results
train
def get_outcome_results(self, course_id, include=None, outcome_ids=None, user_ids=None): """ Get outcome results. Gets the outcome results for users and outcomes in the specified context. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - user_ids """If specified, only the users whose ids are given will be included in the results. SIS ids can be used, prefixed by "sis_user_id:". It is an error to specify an id for a user who is not a student in the context.""" if user_ids is not None: params["user_ids"] = user_ids # OPTIONAL - outcome_ids """If specified, only the outcomes whose ids are given will be included in the results. it is an error to specify an id for an outcome which is not linked to the context.""" if outcome_ids is not None: params["outcome_ids"] = outcome_ids # OPTIONAL - include """[String, "alignments"|"outcomes"|"outcomes.alignments"|"outcome_groups"|"outcome_links"|"outcome_paths"|"users"] Specify additional collections to be side loaded with the result. "alignments" includes only the alignments referenced by the returned results. "outcomes.alignments" includes all alignments referenced by outcomes in the context.""" if include is not None: params["include"] = include self.logger.debug("GET /api/v1/courses/{course_id}/outcome_results with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/outcome_results".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42086
OutcomeResultsAPI.get_outcome_result_rollups
train
def get_outcome_result_rollups(self, course_id, aggregate=None, include=None, outcome_ids=None, user_ids=None): """ Get outcome result rollups. Gets the outcome rollups for the users and outcomes in the specified context. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - aggregate """If specified, instead of returning one rollup for each user, all the user rollups will be combined into one rollup for the course that will contain the average rollup score for each outcome.""" if aggregate is not None: self._validate_enum(aggregate, ["course"]) params["aggregate"] = aggregate # OPTIONAL - user_ids """If specified, only the users whose ids are given will be included in the results or used in an aggregate result. it is an error to specify an id for a user who is not a student in the context""" if user_ids is not None: params["user_ids"] = user_ids # OPTIONAL - outcome_ids """If specified, only the outcomes whose ids are given will be included in the results. it is an error to specify an id for an outcome which is not linked to the context.""" if outcome_ids is not None: params["outcome_ids"] = outcome_ids # OPTIONAL - include """[String, "courses"|"outcomes"|"outcomes.alignments"|"outcome_groups"|"outcome_links"|"outcome_paths"|"users"] Specify additional collections to be side loaded with the result.""" if include is not None: params["include"] = include self.logger.debug("GET /api/v1/courses/{course_id}/outcome_rollups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/outcome_rollups".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42087
ipmitool._subprocess_method
train
def _subprocess_method(self, command): """Use the subprocess module to execute ipmitool commands and and set status """ p = subprocess.Popen([self._ipmitool_path] + self.args + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.output, self.error = p.communicate() self.status = p.returncode
python
{ "resource": "" }
q42088
ipmitool._expect_method
train
def _expect_method(self, command): """Use the expect module to execute ipmitool commands and set status """ child = pexpect.spawn(self._ipmitool_path, self.args + command) i = child.expect([pexpect.TIMEOUT, 'Password: '], timeout=10) if i == 0: child.terminate() self.error = 'ipmitool command timed out' self.status = 1 else: child.sendline(self.password) i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=10) if i == 0: child.terminate() self.error = 'ipmitool command timed out' self.status = 1 else: if child.exitstatus: self.error = child.before else: self.output = child.before self.status = child.exitstatus child.close()
python
{ "resource": "" }
q42089
ipmitool._get_ipmitool_path
train
def _get_ipmitool_path(self, cmd='ipmitool'): """Get full path to the ipmitool command using the unix `which` command """ p = subprocess.Popen(["which", cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return out.strip()
python
{ "resource": "" }
q42090
Remove.truncate
train
def truncate(self, table): """Empty a table by deleting all of its rows.""" if isinstance(table, (list, set, tuple)): for t in table: self._truncate(t) else: self._truncate(table)
python
{ "resource": "" }
q42091
Remove._truncate
train
def _truncate(self, table): """ Remove all records from a table in MySQL It performs the same function as a DELETE statement without a WHERE clause. """ statement = "TRUNCATE TABLE {0}".format(wrap(table)) self.execute(statement) self._printer('\tTruncated table {0}'.format(table))
python
{ "resource": "" }
q42092
Remove.truncate_database
train
def truncate_database(self, database=None): """Drop all tables in a database.""" # Change database if needed if database in self.databases and database is not self.database: self.change_db(database) # Get list of tables tables = self.tables if isinstance(self.tables, list) else [self.tables] if len(tables) > 0: self.drop(tables) self._printer('\t' + str(len(tables)), 'tables truncated from', database) return tables
python
{ "resource": "" }
q42093
Remove.drop
train
def drop(self, table): """ Drop a table from a database. Accepts either a string representing a table name or a list of strings representing a table names. """ existing_tables = self.tables if isinstance(table, (list, set, tuple)): for t in table: self._drop(t, existing_tables) else: self._drop(table, existing_tables) return table
python
{ "resource": "" }
q42094
Remove._drop
train
def _drop(self, table, existing_tables=None): """Private method for executing table drop commands.""" # Retrieve list of existing tables for comparison existing_tables = existing_tables if existing_tables else self.tables # Only drop table if it exists if table in existing_tables: # Set to avoid foreign key errorrs self.execute('SET FOREIGN_KEY_CHECKS = 0') query = 'DROP TABLE {0}'.format(wrap(table)) self.execute(query) # Set again self.execute('SET FOREIGN_KEY_CHECKS = 1') self._printer('\tDropped table {0}'.format(table))
python
{ "resource": "" }
q42095
Remove.drop_empty_tables
train
def drop_empty_tables(self): """Drop all empty tables in a database.""" # Count number of rows in each table counts = self.count_rows_all() drops = [] # Loop through each table key and validate that rows count is not 0 for table, count in counts.items(): if count < 1: # Drop table if it contains no rows self.drop(table) drops.append(table) return drops
python
{ "resource": "" }
q42096
Update.update
train
def update(self, table, columns, values, where): """ Update the values of a particular row where a value is met. :param table: table name :param columns: column(s) to update :param values: updated values :param where: tuple, (where_column, where_value) """ # Unpack WHERE clause dictionary into tuple where_col, where_val = where # Create column string from list of values cols = get_col_val_str(columns, query_type='update') # Concatenate statement statement = "UPDATE {0} SET {1} WHERE {2}='{3}'".format(wrap(table), cols, where_col, where_val) # Execute statement self._cursor.execute(statement, values) self._printer('\tMySQL cols (' + str(len(values)) + ') successfully UPDATED')
python
{ "resource": "" }
q42097
Update.update_many
train
def update_many(self, table, columns, values, where_col, where_index): """ Update the values of several rows. :param table: Name of the MySQL table :param columns: List of columns :param values: 2D list of rows :param where_col: Column name for where clause :param where_index: Row index of value to be used for where comparison :return: """ for row in values: wi = row.pop(where_index) self.update(table, columns, row, (where_col, wi))
python
{ "resource": "" }
q42098
GroupsAPI.list_groups_available_in_context_accounts
train
def list_groups_available_in_context_accounts(self, account_id, include=None, only_own_groups=None): """ List the groups available in a context. Returns the list of active groups in the given context that are visible to user. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - only_own_groups """Will only include groups that the user belongs to if this is set""" if only_own_groups is not None: params["only_own_groups"] = only_own_groups # OPTIONAL - include """- "tabs": Include the list of tabs configured for each group. See the {api:TabsController#index List available tabs API} for more information.""" if include is not None: self._validate_enum(include, ["tabs"]) params["include"] = include self.logger.debug("GET /api/v1/accounts/{account_id}/groups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/groups".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42099
GroupsAPI.invite_others_to_group
train
def invite_others_to_group(self, group_id, invitees): """ Invite others to a group. Sends an invitation to all supplied email addresses which will allow the receivers to join the group. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - invitees """An array of email addresses to be sent invitations.""" data["invitees"] = invitees self.logger.debug("POST /api/v1/groups/{group_id}/invite with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/groups/{group_id}/invite".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }