_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q41800
get_sesames
train
def get_sesames(email, password, device_ids=None, nicknames=None, timeout=5): """Return list of available Sesame objects.""" sesames = [] account = CandyHouseAccount(email, password, timeout=timeout) for sesame in account.sesames: if device_ids is not None and sesame['device_id'] not in device_ids: continue if nicknames is not None and sesame['nickname'] not in nicknames: continue sesames.append(Sesame(account, sesame)) return sesames
python
{ "resource": "" }
q41801
RotaryEncoder.pulse
train
def pulse(self): """ Calls when_rotated callback if detected changes """ new_b_value = self.gpio_b.is_active new_a_value = self.gpio_a.is_active value = self.table_values.value(new_b_value, new_a_value, self.old_b_value, self.old_a_value) self.old_b_value = new_b_value self.old_a_value = new_a_value if value != 0: self.when_rotated(value)
python
{ "resource": "" }
q41802
DataAPI.default_versions
train
def default_versions(self, default_versions): ''' Set archive default read versions Parameters ---------- default_versions: dict Dictionary of archive_name, version pairs. On read/download, archives in this dictionary will download the specified version by default. Before assignment, archive_names are checked and normalized. ''' default_versions = { self._normalize_archive_name(arch)[1]: v for arch, v in default_versions.items()} self._default_versions = default_versions
python
{ "resource": "" }
q41803
DataAPI.create
train
def create( self, archive_name, authority_name=None, versioned=True, raise_on_err=True, metadata=None, tags=None, helper=False): ''' Create a DataFS archive Parameters ---------- archive_name: str Name of the archive authority_name: str Name of the data service to use as the archive's data authority versioned: bool If true, store all versions with explicit version numbers (defualt) raise_on_err: bool Raise an error if the archive already exists (default True) metadata: dict Dictionary of additional archive metadata helper: bool If true, interactively prompt for required metadata (default False) ''' authority_name, archive_name = self._normalize_archive_name( archive_name, authority_name=authority_name) if authority_name is None: authority_name = self.default_authority_name self._validate_archive_name(archive_name) if metadata is None: metadata = {} res = self.manager.create_archive( archive_name, authority_name, archive_path=archive_name, versioned=versioned, raise_on_err=raise_on_err, metadata=metadata, user_config=self.user_config, tags=tags, helper=helper) return self._ArchiveConstructor( api=self, **res)
python
{ "resource": "" }
q41804
DataAPI.get_archive
train
def get_archive(self, archive_name, default_version=None): ''' Retrieve a data archive Parameters ---------- archive_name: str Name of the archive to retrieve default_version: version str or :py:class:`~distutils.StrictVersion` giving the default version number to be used on read operations Returns ------- archive: object New :py:class:`~datafs.core.data_archive.DataArchive` object Raises ------ KeyError: A KeyError is raised when the ``archive_name`` is not found ''' auth, archive_name = self._normalize_archive_name(archive_name) res = self.manager.get_archive(archive_name) if default_version is None: default_version = self._default_versions.get(archive_name, None) if (auth is not None) and (auth != res['authority_name']): raise ValueError( 'Archive "{}" not found on {}.'.format(archive_name, auth) + ' Did you mean "{}://{}"?'.format( res['authority_name'], archive_name)) return self._ArchiveConstructor( api=self, default_version=default_version, **res)
python
{ "resource": "" }
q41805
DataAPI.search
train
def search(self, *query, **kwargs): ''' Searches based on tags specified by users Parameters --------- query: str tags to search on. If multiple terms, provided in comma delimited string format prefix: str start of archive name. Providing a start string improves search speed. ''' prefix = kwargs.get('prefix') if prefix is not None: prefix = fs.path.relpath(prefix) return self.manager.search(query, begins_with=prefix)
python
{ "resource": "" }
q41806
DataAPI._validate_archive_name
train
def _validate_archive_name(self, archive_name): ''' Utility function for creating and validating archive names Parameters ---------- archive_name: str Name of the archive from which to create a service path Returns ------- archive_path: str Internal path used by services to reference archive data ''' archive_name = fs.path.normpath(archive_name) patterns = self.manager.required_archive_patterns for pattern in patterns: if not re.search(pattern, archive_name): raise ValueError( "archive name does not match pattern '{}'".format(pattern))
python
{ "resource": "" }
q41807
DataAPI.hash_file
train
def hash_file(f): ''' Utility function for hashing file contents Overload this function to change the file equality checking algorithm Parameters ---------- f: file-like File-like object or file path from which to compute checksum value Returns ------- checksum: dict dictionary with {'algorithm': 'md5', 'checksum': hexdigest} ''' md5 = hashlib.md5() with open_filelike(f, 'rb') as f_obj: for chunk in iter(lambda: f_obj.read(128 * md5.block_size), b''): md5.update(chunk) return {'algorithm': 'md5', 'checksum': md5.hexdigest()}
python
{ "resource": "" }
q41808
html_list
train
def html_list(data): """Convert dict into formatted HTML.""" if data is None: return None as_li = lambda v: "<li>%s</li>" % v items = [as_li(v) for v in data] return mark_safe("<ul>%s</ul>" % ''.join(items))
python
{ "resource": "" }
q41809
check_pypi
train
def check_pypi(modeladmin, request, queryset): """Update latest package info from PyPI.""" for p in queryset: if p.is_editable: logger.debug("Ignoring version update '%s' is editable", p.package_name) else: p.update_from_pypi()
python
{ "resource": "" }
q41810
PackageVersionAdmin._updateable
train
def _updateable(self, obj): """Return True if there are available updates.""" if obj.latest_version is None or obj.is_editable: return None else: return obj.latest_version != obj.current_version
python
{ "resource": "" }
q41811
PackageVersionAdmin.available_updates
train
def available_updates(self, obj): """Print out all versions ahead of the current one.""" from package_monitor import pypi package = pypi.Package(obj.package_name) versions = package.all_versions() return html_list([v for v in versions if v > obj.current_version])
python
{ "resource": "" }
q41812
get_agent
train
def get_agent(msg): """ Handy hack to handle legacy messages where 'agent' was a list. """ agent = msg['msg']['agent'] if isinstance(agent, list): agent = agent[0] return agent
python
{ "resource": "" }
q41813
run_suite
train
def run_suite(case, config, summary): """ Run the full suite of verification tests """ config["name"] = case model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case) bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case) tabs = [] case_summary = LIVVDict() model_cases = functions.collect_cases(model_dir) bench_cases = functions.collect_cases(bench_dir) for subcase in sorted(six.iterkeys(model_cases)): bench_subcases = bench_cases[subcase] if subcase in bench_cases else [] case_sections = [] for mcase in sorted(model_cases[subcase], key=functions.sort_processor_counts): bpath = (os.path.join(bench_dir, subcase, mcase.replace("-", os.path.sep)) if mcase in bench_subcases else "") mpath = os.path.join(model_dir, subcase, mcase.replace("-", os.path.sep)) case_result = _analyze_case(mpath, bpath, config) case_sections.append(elements.section(mcase, case_result)) case_summary[subcase] = _summarize_result(case_result, case_summary[subcase]) tabs.append(elements.tab(subcase, section_list=case_sections)) result = elements.page(case, config["description"], tab_list=tabs) summary[case] = case_summary _print_summary(case, summary[case]) functions.create_page_from_template("verification.html", os.path.join(livvkit.index_dir, "verification", case + ".html") ) functions.write_json(result, os.path.join(livvkit.output_dir, "verification"), case+".json")
python
{ "resource": "" }
q41814
_analyze_case
train
def _analyze_case(model_dir, bench_dir, config): """ Runs all of the verification checks on a particular case """ bundle = livvkit.verification_model_module model_out = functions.find_file(model_dir, "*"+config["output_ext"]) bench_out = functions.find_file(bench_dir, "*"+config["output_ext"]) model_config = functions.find_file(model_dir, "*"+config["config_ext"]) bench_config = functions.find_file(bench_dir, "*"+config["config_ext"]) model_log = functions.find_file(model_dir, "*"+config["logfile_ext"]) el = [ bit_for_bit(model_out, bench_out, config), diff_configurations(model_config, bench_config, bundle, bundle), bundle.parse_log(model_log) ] return el
python
{ "resource": "" }
q41815
bit_for_bit
train
def bit_for_bit(model_path, bench_path, config): """ Checks whether the given files have bit for bit solution matches on the given variable list. Args: model_path: absolute path to the model dataset bench_path: absolute path to the benchmark dataset config: the configuration of the set of analyses Returns: A dictionary created by the elements object corresponding to the results of the bit for bit testing """ fname = model_path.split(os.path.sep)[-1] # Error handling if not (os.path.isfile(bench_path) and os.path.isfile(model_path)): return elements.error("Bit for Bit", "File named " + fname + " has no suitable match!") try: model_data = Dataset(model_path) bench_data = Dataset(bench_path) except (FileNotFoundError, PermissionError): return elements.error("Bit for Bit", "File named " + fname + " could not be read!") if not (netcdf.has_time(model_data) and netcdf.has_time(bench_data)): return elements.error("Bit for Bit", "File named " + fname + " could not be read!") # Begin bit for bit analysis headers = ["Max Error", "Index of Max Error", "RMS Error", "Plot"] stats = LIVVDict() for i, var in enumerate(config["bit_for_bit_vars"]): if var in model_data.variables and var in bench_data.variables: m_vardata = model_data.variables[var][:] b_vardata = bench_data.variables[var][:] diff_data = m_vardata - b_vardata if diff_data.any(): stats[var]["Max Error"] = np.amax(np.absolute(diff_data)) stats[var]["Index of Max Error"] = str( np.unravel_index(np.absolute(diff_data).argmax(), diff_data.shape)) stats[var]["RMS Error"] = np.sqrt(np.sum(np.square(diff_data).flatten()) / diff_data.size) pf = plot_bit_for_bit(fname, var, m_vardata, b_vardata, diff_data) else: stats[var]["Max Error"] = stats[var]["RMS Error"] = 0 pf = stats[var]["Index of Max Error"] = "N/A" stats[var]["Plot"] = pf else: stats[var] = {"Max Error": "No Match", "RMS Error": "N/A", "Plot": "N/A"} model_data.close() bench_data.close() return elements.bit_for_bit("Bit for Bit", headers, stats)
python
{ "resource": "" }
q41816
plot_bit_for_bit
train
def plot_bit_for_bit(case, var_name, model_data, bench_data, diff_data): """ Create a bit for bit plot """ plot_title = "" plot_name = case + "_" + var_name + ".png" plot_path = os.path.join(os.path.join(livvkit.output_dir, "verification", "imgs")) functions.mkdir_p(plot_path) m_ndim = np.ndim(model_data) b_ndim = np.ndim(bench_data) if m_ndim != b_ndim: return "Dataset dimensions didn't match!" if m_ndim == 3: model_data = model_data[-1] bench_data = bench_data[-1] diff_data = diff_data[-1] plot_title = "Showing "+var_name+"[-1,:,:]" elif m_ndim == 4: model_data = model_data[-1][0] bench_data = bench_data[-1][0] diff_data = diff_data[-1][0] plot_title = "Showing "+var_name+"[-1,0,:,:]" plt.figure(figsize=(12, 3), dpi=80) plt.clf() # Calculate min and max to scale the colorbars _max = np.amax([np.amax(model_data), np.amax(bench_data)]) _min = np.amin([np.amin(model_data), np.amin(bench_data)]) # Plot the model output plt.subplot(1, 3, 1) plt.xlabel("Model Data") plt.ylabel(var_name) plt.xticks([]) plt.yticks([]) plt.imshow(model_data, vmin=_min, vmax=_max, interpolation='nearest', cmap=colormaps.viridis) plt.colorbar() # Plot the benchmark data plt.subplot(1, 3, 2) plt.xlabel("Benchmark Data") plt.xticks([]) plt.yticks([]) plt.imshow(bench_data, vmin=_min, vmax=_max, interpolation='nearest', cmap=colormaps.viridis) plt.colorbar() # Plot the difference plt.subplot(1, 3, 3) plt.xlabel("Difference") plt.xticks([]) plt.yticks([]) plt.imshow(diff_data, interpolation='nearest', cmap=colormaps.viridis) plt.colorbar() plt.tight_layout(rect=(0, 0, 0.95, 0.9)) plt.suptitle(plot_title) plot_file = os.path.sep.join([plot_path, plot_name]) if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return os.path.join(os.path.relpath(plot_path, os.path.join(livvkit.output_dir, "verification")), plot_name)
python
{ "resource": "" }
q41817
Campaign.save
train
def save(self, *args, **kwargs): """Kicks off celery task to re-save associated special coverages to percolator :param args: inline arguments (optional) :param kwargs: keyword arguments :return: `bulbs.campaigns.Campaign` """ campaign = super(Campaign, self).save(*args, **kwargs) save_campaign_special_coverage_percolator.delay(self.tunic_campaign_id) return campaign
python
{ "resource": "" }
q41818
Count.count_rows_duplicates
train
def count_rows_duplicates(self, table, cols='*'): """Get the number of rows that do not contain distinct values.""" return self.count_rows(table, '*') - self.count_rows_distinct(table, cols)
python
{ "resource": "" }
q41819
Count.count_rows
train
def count_rows(self, table, cols='*'): """Get the number of rows in a particular table.""" query = 'SELECT COUNT({0}) FROM {1}'.format(join_cols(cols), wrap(table)) result = self.fetch(query) return result if result is not None else 0
python
{ "resource": "" }
q41820
Count.count_rows_distinct
train
def count_rows_distinct(self, table, cols='*'): """Get the number distinct of rows in a particular table.""" return self.fetch('SELECT COUNT(DISTINCT {0}) FROM {1}'.format(join_cols(cols), wrap(table)))
python
{ "resource": "" }
q41821
Structure.get_unique_column
train
def get_unique_column(self, table): """Determine if any of the columns in a table contain exclusively unique values.""" for col in self.get_columns(table): if self.count_rows_duplicates(table, col) == 0: return col
python
{ "resource": "" }
q41822
Structure.get_duplicate_vals
train
def get_duplicate_vals(self, table, column): """Retrieve duplicate values in a column of a table.""" query = 'SELECT {0} FROM {1} GROUP BY {0} HAVING COUNT(*) > 1'.format(join_cols(column), wrap(table)) return self.fetch(query)
python
{ "resource": "" }
q41823
BaseSimpleMetadata.get_field_info
train
def get_field_info(self, field): """ This method is basically a mirror from rest_framework==3.3.3 We are currently pinned to rest_framework==3.1.1. If we upgrade, this can be refactored and simplified to rely more heavily on rest_framework's built in logic. """ field_info = self.get_attributes(field) field_info["required"] = getattr(field, "required", False) field_info["type"] = self.get_label_lookup(field) if getattr(field, "child", None): field_info["child"] = self.get_field_info(field.child) elif getattr(field, "fields", None): field_info["children"] = self.get_serializer_info(field) if (not isinstance(field, (serializers.RelatedField, serializers.ManyRelatedField)) and hasattr(field, "choices")): field_info["choices"] = [ { "value": choice_value, "display_name": force_text(choice_name, strings_only=True) } for choice_value, choice_name in field.choices.items() ] return field_info
python
{ "resource": "" }
q41824
StringBuffer._double_prefix
train
def _double_prefix(self): """Grow the given deque by doubling, but don't split the second chunk just because the first one is small. """ new_len = max(len(self._buf[0]) * 2, (len(self._buf[0]) + len(self._buf[1]))) self._merge_prefix(new_len)
python
{ "resource": "" }
q41825
StringBuffer._merge_prefix
train
def _merge_prefix(self, size): """Replace the first entries in a deque of strings with a single string of up to size bytes. >>> d = collections.deque(['abc', 'de', 'fghi', 'j']) >>> _merge_prefix(d, 5); print(d) deque(['abcde', 'fghi', 'j']) Strings will be split as necessary to reach the desired size. >>> _merge_prefix(d, 7); print(d) deque(['abcdefg', 'hi', 'j']) >>> _merge_prefix(d, 3); print(d) deque(['abc', 'defg', 'hi', 'j']) >>> _merge_prefix(d, 100); print(d) deque(['abcdefghij']) """ if len(self._buf) == 1 and len(self._buf[0]) <= size: return prefix = [] remaining = size while self._buf and remaining > 0: chunk = self._buf.popleft() if len(chunk) > remaining: self._buf.appendleft(chunk[remaining:]) chunk = chunk[:remaining] prefix.append(chunk) remaining -= len(chunk) if prefix: self._buf.appendleft(b''.join(prefix)) if not self._buf: self._buf.appendleft(b'')
python
{ "resource": "" }
q41826
get_sponsored_special_coverage_query
train
def get_sponsored_special_coverage_query(only_recent=False): """ Reference to all SpecialCovearge queries. :param only_recent: references RECENT_SPONSORED_OFFSET_HOURS from django settings. Used to return sponsored content within a given configuration of hours. :returns: Djes.LazySearch query matching all active speical coverages. """ special_coverages = get_sponsored_special_coverages() es_query = SearchParty(special_coverages).search() if only_recent: offset = getattr(settings, "RECENT_SPONSORED_OFFSET_HOURS", 0) es_query = es_query.filter( Published(after=timezone.now() - timezone.timedelta(hours=offset)) ) return es_query
python
{ "resource": "" }
q41827
DebuggedApplication.debug_application
train
def debug_application(self, environ, start_response): """Run the application and conserve the traceback frames.""" app_iter = None try: app_iter = self.app(environ, start_response) for item in app_iter: yield item if hasattr(app_iter, 'close'): app_iter.close() except Exception: if hasattr(app_iter, 'close'): app_iter.close() context = RequestContext({'environ':dict(environ)}) for injector in self.context_injectors: context.update(injector(environ)) traceback = get_current_traceback(skip=1, show_hidden_frames=self.show_hidden_frames, context=context) for frame in traceback.frames: self.frames[frame.id] = frame self.tracebacks[traceback.id] = traceback try: start_response('500 INTERNAL SERVER ERROR', [ ('Content-Type', 'text/html; charset=utf-8'), # Disable Chrome's XSS protection, the debug # output can cause false-positives. ('X-XSS-Protection', '0'), ]) except Exception: # if we end up here there has been output but an error # occurred. in that situation we can do nothing fancy any # more, better log something into the error log and fall # back gracefully. environ['wsgi.errors'].write( 'Debugging middleware caught exception in streamed ' 'response at a point where response headers were already ' 'sent.\n') else: yield traceback.render_full( evalex=self.evalex, secret=self.secret ).encode('utf-8', 'replace') # This will lead to double logging in case backlash logger is set to DEBUG # but this is actually wanted as some environments, like WebTest, swallow # wsgi.environ making the traceback totally disappear. log.debug(traceback.plaintext) traceback.log(environ['wsgi.errors'])
python
{ "resource": "" }
q41828
DebuggedApplication.paste_traceback
train
def paste_traceback(self, request, traceback): """Paste the traceback and return a JSON response.""" rv = traceback.paste() return Response(json.dumps(rv), content_type='application/json')
python
{ "resource": "" }
q41829
ESPublishedFilterBackend.filter_queryset
train
def filter_queryset(self, request, queryset, view): """Apply the relevant behaviors to the view queryset.""" start_value = self.get_start(request) if start_value: queryset = self.apply_published_filter(queryset, "after", start_value) end_value = self.get_end(request) if end_value: # Forces the end_value to be the last second of the date provided in the query. # Necessary currently as our Published filter for es only applies to gte & lte. queryset = self.apply_published_filter(queryset, "before", end_value) return queryset
python
{ "resource": "" }
q41830
ESPublishedFilterBackend.apply_published_filter
train
def apply_published_filter(self, queryset, operation, value): """ Add the appropriate Published filter to a given elasticsearch query. :param queryset: The DJES queryset object to be filtered. :param operation: The type of filter (before/after). :param value: The date or datetime value being applied to the filter. """ if operation not in ["after", "before"]: raise ValueError("""Publish filters only use before or after for range filters.""") return queryset.filter(Published(**{operation: value}))
python
{ "resource": "" }
q41831
ESPublishedFilterBackend.get_date_datetime_param
train
def get_date_datetime_param(self, request, param): """Check the request for the provided query parameter and returns a rounded value. :param request: WSGI request object to retrieve query parameter data. :param param: the name of the query parameter. """ if param in request.GET: param_value = request.GET.get(param, None) # Match and interpret param if formatted as a date. date_match = dateparse.date_re.match(param_value) if date_match: return timezone.datetime.combine( dateparse.parse_date(date_match.group(0)), timezone.datetime.min.time() ) datetime_match = dateparse.datetime_re.match(param_value) if datetime_match: return timezone.datetime.combine( dateparse.parse_datetime(datetime_match.group(0)).date(), timezone.datetime.min.time() ) return None
python
{ "resource": "" }
q41832
Session.create
train
def create(self, message, mid=None, age=60, force=True): """ create session force if you pass `force = False`, it may raise SessionError due to duplicate message id """ with self.session_lock: if not hasattr(message, "id"): message.__setattr__("id", "event-%s" % (uuid.uuid4().hex,)) if self.session_list.get(message.id, None) is not None: if force is False: raise SessionError("Message id: %s duplicate!" % message.id) else: message = Message(message.to_dict(), generate_id=True) session = { "status": Status.CREATED, "message": message, "age": age, "mid": mid, "created_at": time(), "is_published": Event(), "is_resolved": Event() } self.session_list.update({ message.id: session }) return session
python
{ "resource": "" }
q41833
parse_args
train
def parse_args(args=None): """ Handles the parsing of options for LIVVkit's command line interface Args: args: The list of arguments, typically sys.argv[1:] """ parser = argparse.ArgumentParser(description="Main script to run LIVVkit.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, fromfile_prefix_chars='@') parser.add_argument('-o', '--out-dir', default=os.path.join(os.getcwd(), "vv_" + time.strftime("%Y-%m-%d")), help='Location to output the LIVVkit webpages.' ) parser.add_argument('-v', '--verify', nargs=2, default=None, help=' '.join(['Specify the locations of the test and bench bundle to', 'compare (respectively).' ]) ) parser.add_argument('-V', '--validate', action='store', nargs='+', default=None, help=' '.join(['Specify the location of the configuration files for', 'validation tests.' ]) ) # FIXME: this just short-circuits to the validation option, and should become its own module parser.add_argument('-e', '--extension', action='store', nargs='+', default=None, dest='validate', metavar='EXTENSION', help=' '.join(['Specify the location of the configuration files for', 'LIVVkit extensions.' ]) ) parser.add_argument('-p', '--publish', action='store_true', help=' '.join(['Also produce a publication quality copy of the figure in', 'the output directory (eps, 600d pi).' ]) ) parser.add_argument('-s', '--serve', nargs='?', type=int, const=8000, help=' '.join(['Start a simple HTTP server for the output website specified', 'by OUT_DIR on port SERVE.' ]) ) parser.add_argument('--version', action='version', version='LIVVkit {}'.format(livvkit.__version__), help="Show LIVVkit's version number and exit" ) return init(parser.parse_args(args))
python
{ "resource": "" }
q41834
GroupCategoriesAPI.get_single_group_category
train
def get_single_group_category(self, group_category_id): """ Get a single group category. Returns the data for a single group category, or a 401 if the caller doesn't have the rights to see it. """ path = {} data = {} params = {} # REQUIRED - PATH - group_category_id """ID""" path["group_category_id"] = group_category_id self.logger.debug("GET /api/v1/group_categories/{group_category_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/group_categories/{group_category_id}".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q41835
GroupCategoriesAPI.create_group_category_accounts
train
def create_group_category_accounts(self, name, account_id, auto_leader=None, create_group_count=None, group_limit=None, self_signup=None, split_group_count=None): """ Create a Group Category. Create a new group category """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - name """Name of the group category""" data["name"] = name # OPTIONAL - self_signup """Allow students to sign up for a group themselves (Course Only). valid values are: "enabled":: allows students to self sign up for any group in course "restricted":: allows students to self sign up only for groups in the same section null disallows self sign up""" if self_signup is not None: self._validate_enum(self_signup, ["enabled", "restricted"]) data["self_signup"] = self_signup # OPTIONAL - auto_leader """Assigns group leaders automatically when generating and allocating students to groups Valid values are: "first":: the first student to be allocated to a group is the leader "random":: a random student from all members is chosen as the leader""" if auto_leader is not None: self._validate_enum(auto_leader, ["first", "random"]) data["auto_leader"] = auto_leader # OPTIONAL - group_limit """Limit the maximum number of users in each group (Course Only). Requires self signup.""" if group_limit is not None: data["group_limit"] = group_limit # OPTIONAL - create_group_count """Create this number of groups (Course Only).""" if create_group_count is not None: data["create_group_count"] = create_group_count # OPTIONAL - split_group_count """(Deprecated) Create this number of groups, and evenly distribute students among them. not allowed with "enable_self_signup". because the group assignment happens synchronously, it's recommended that you instead use the assign_unassigned_members endpoint. (Course Only)""" if split_group_count is not None: data["split_group_count"] = split_group_count self.logger.debug("POST /api/v1/accounts/{account_id}/group_categories with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/accounts/{account_id}/group_categories".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q41836
GroupCategoriesAPI.list_groups_in_group_category
train
def list_groups_in_group_category(self, group_category_id): """ List groups in group category. Returns a list of groups in a group category """ path = {} data = {} params = {} # REQUIRED - PATH - group_category_id """ID""" path["group_category_id"] = group_category_id self.logger.debug("GET /api/v1/group_categories/{group_category_id}/groups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/group_categories/{group_category_id}/groups".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q41837
GroupCategoriesAPI.list_users_in_group_category
train
def list_users_in_group_category(self, group_category_id, search_term=None, unassigned=None): """ List users in group category. Returns a list of users in the group category. """ path = {} data = {} params = {} # REQUIRED - PATH - group_category_id """ID""" path["group_category_id"] = group_category_id # OPTIONAL - search_term """The partial name or full ID of the users to match and return in the results list. Must be at least 3 characters.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - unassigned """Set this value to true if you wish only to search unassigned users in the group category.""" if unassigned is not None: params["unassigned"] = unassigned self.logger.debug("GET /api/v1/group_categories/{group_category_id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/group_categories/{group_category_id}/users".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q41838
GroupCategoriesAPI.assign_unassigned_members
train
def assign_unassigned_members(self, group_category_id, sync=None): """ Assign unassigned members. Assign all unassigned members as evenly as possible among the existing student groups. """ path = {} data = {} params = {} # REQUIRED - PATH - group_category_id """ID""" path["group_category_id"] = group_category_id # OPTIONAL - sync """The assigning is done asynchronously by default. If you would like to override this and have the assigning done synchronously, set this value to true.""" if sync is not None: data["sync"] = sync self.logger.debug("POST /api/v1/group_categories/{group_category_id}/assign_unassigned_members with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/group_categories/{group_category_id}/assign_unassigned_members".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q41839
Payment.set_shipping
train
def set_shipping(self, *args, **kwargs): ''' Define os atributos do frete Args: type (int): (opcional) Tipo de frete. Os valores válidos são: 1 para 'Encomenda normal (PAC).', 2 para 'SEDEX' e 3 para 'Tipo de frete não especificado.' cost (float): (opcional) Valor total do frete. Deve ser maior que 0.00 e menor ou igual a 9999999.00. street (str): (opcional) Nome da rua do endereço de envio do produto address_number: (opcional) Número do endereço de envio do produto. complement: (opcional) Complemento (bloco, apartamento, etc.) do endereço de envio do produto. district: (opcional) Bairro do endereço de envio do produto. postal_code: (opcional) CEP do endereço de envio do produto. city: (opcional) Cidade do endereço de envio do produto. state: (opcional) Estado do endereço de envio do produto. country: (opcional) País do endereço de envio do produto. Apenas o valor 'BRA' é aceito. ''' self.shipping = {} for arg, value in kwargs.iteritems(): self.shipping[arg] = value shipping_schema(self.shipping)
python
{ "resource": "" }
q41840
opensearch
train
def opensearch(request): """ Return opensearch.xml. """ contact_email = settings.CONTACT_EMAIL short_name = settings.SHORT_NAME description = settings.DESCRIPTION favicon_width = settings.FAVICON_WIDTH favicon_height = settings.FAVICON_HEIGHT favicon_type = settings.FAVICON_TYPE favicon_file = settings.FAVICON_FILE url = "{url}?{querystring}{{searchTerms}}".format(**{ "url": request.build_absolute_uri(reverse(settings.SEARCH_URL)), "querystring": settings.SEARCH_QUERYSTRING, }) input_encoding = settings.INPUT_ENCODING.upper() return render_to_response("opensearch/opensearch.xml", context=locals(), content_type="application/opensearchdescription+xml")
python
{ "resource": "" }
q41841
ModelInitiator.db_manager
train
def db_manager(self): """ " Do series of DB operations. """ rc_create = self.create_db() # for first create try: self.load_db() # load existing/factory except Exception as e: _logger.debug("*** %s" % str(e)) try: self.recover_db(self.backup_json_db_path) except Exception: pass else: if rc_create is True: self.db_status = "factory" else: self.db_status = "existing" return True try: self.load_db() # load backup except Exception as b: _logger.debug("*** %s" % str(b)) self.recover_db(self.factory_json_db_path) self.load_db() # load factory self.db_status = "factory" else: self.db_status = "backup" finally: return True
python
{ "resource": "" }
q41842
ModelInitiator.create_db
train
def create_db(self): """ " Create a db file for model if there is no db. " User need to prepare thier own xxx.json.factory. """ if self.db_type != "json": raise RuntimeError("db_type only supports json now") if os.path.exists(self.json_db_path): return False if os.path.exists(self.factory_json_db_path): with self.db_mutex: shutil.copy2( self.factory_json_db_path, self.json_db_path) return True _logger.debug( "*** NO such file: %s" % self.factory_json_db_path) raise RuntimeError("No *.json.factory file")
python
{ "resource": "" }
q41843
ModelInitiator.backup_db
train
def backup_db(self): """ " Generate a xxxxx.backup.json. """ with self.db_mutex: if os.path.exists(self.json_db_path): try: shutil.copy2(self.json_db_path, self.backup_json_db_path) except (IOError, OSError): _logger.debug("*** No file to copy.")
python
{ "resource": "" }
q41844
ModelInitiator.load_db
train
def load_db(self): """ " Load json db as a dictionary. """ try: with open(self.json_db_path) as fp: self.db = json.load(fp) except Exception as e: _logger.debug("*** Open JSON DB error.") raise e
python
{ "resource": "" }
q41845
ModelInitiator.save_db
train
def save_db(self): """ " Save json db to file system. """ with self.db_mutex: if not isinstance(self.db, dict) and not isinstance(self.db, list): return False try: with open(self.json_db_path, "w") as fp: json.dump(self.db, fp, indent=4) except Exception as e: # disk full or something. _logger.debug("*** Write JSON DB to file error.") raise e else: self.sync() return True
python
{ "resource": "" }
q41846
IxnPort.wait_for_states
train
def wait_for_states(self, timeout=40, *states): """ Wait until port reaches one of the requested states. :param timeout: max time to wait for requested port states. """ state = self.get_attribute('state') for _ in range(timeout): if state in states: return time.sleep(1) state = self.get_attribute('state') raise TgnError('Failed to reach states {}, port state is {} after {} seconds'.format(states, state, timeout))
python
{ "resource": "" }
q41847
FormWizardAdminView.get_form
train
def get_form(self, step=None, data=None, files=None): """Instanciate the form for the current step FormAdminView from xadmin expects form to be at self.form_obj """ self.form_obj = super(FormWizardAdminView, self).get_form( step=step, data=data, files=files) return self.form_obj
python
{ "resource": "" }
q41848
FormWizardAdminView.render
train
def render(self, form=None, **kwargs): """Returns the ``HttpResponse`` with the context data""" context = self.get_context(**kwargs) return self.render_to_response(context)
python
{ "resource": "" }
q41849
FormWizardAdminView.render_to_response
train
def render_to_response(self, context): """Add django-crispy form helper and draw the template Returns the ``TemplateResponse`` ready to be displayed """ self.setup_forms() return TemplateResponse( self.request, self.form_template, context, current_app=self.admin_site.name)
python
{ "resource": "" }
q41850
FormWizardAdminView.get_context
train
def get_context(self, **kwargs): """Use this method to built context data for the template Mix django wizard context data with django-xadmin context """ context = self.get_context_data(form=self.form_obj, **kwargs) context.update(super(FormAdminView, self).get_context()) return context
python
{ "resource": "" }
q41851
FqMerger.run
train
def run(self): """Run merging. """ print("", file=sys.stderr) print("Going to merge/convert RNF-FASTQ files.", file=sys.stderr) print("", file=sys.stderr) print(" mode: ", self.mode, file=sys.stderr) print(" input files: ", ", ".join(self.input_files_fn), file=sys.stderr) print(" output files: ", ", ".join(self.output_files_fn), file=sys.stderr) print("", file=sys.stderr) while len(self.i_files_weighted) > 0: file_id = self.rng.randint(0, len(self.i_files_weighted) - 1) for i in range(READS_IN_GROUP * self._reads_in_tuple): if self.i_files_weighted[file_id].closed: del self.i_files_weighted[file_id] break ln1 = self.i_files_weighted[file_id].readline() ln2 = self.i_files_weighted[file_id].readline() ln3 = self.i_files_weighted[file_id].readline() ln4 = self.i_files_weighted[file_id].readline() if ln1 == "" or ln2 == "" or ln3 == "" or ln4 == "": self.i_files_weighted[file_id].close() del self.i_files_weighted[file_id] break assert ln1[0] == "@", ln1 assert ln3[0] == "+", ln3 self.output.save_read(ln1, ln2, ln3, ln4) self.output.close()
python
{ "resource": "" }
q41852
is_config_container
train
def is_config_container(v): """ checks whether v is of type list,dict or Config """ cls = type(v) return ( issubclass(cls, list) or issubclass(cls, dict) or issubclass(cls, Config) )
python
{ "resource": "" }
q41853
Handler.validate
train
def validate(cls, cfg, path="", nested=0, parent_cfg=None): """ Validates a section of a config dict. Will automatically validate child sections as well if their attribute pointers are instantiated with a handler property """ # number of critical errors found num_crit = 0 # number of non-critical errors found num_warn = 0 # check for missing keys in the config for name in dir(cls): if nested > 0: break try: attr = getattr(cls, name) if isinstance(attr, Attribute): if attr.default is None and name not in cfg: # no default value defined, which means its required # to be set in the config file if path: attr_full_name = "%s.%s" % (path, name) else: attr_full_name = name raise vodka.exceptions.ConfigErrorMissing( attr_full_name, attr) attr.preload(cfg, name) except vodka.exceptions.ConfigErrorMissing as inst: if inst.level == "warn": vodka.log.warn(inst.explanation) num_warn += 1 elif inst.level == "critical": vodka.log.error(inst.explanation) num_crit += 1 if type(cfg) in [dict, Config]: keys = list(cfg.keys()) if nested > 0: for _k, _v in cfg.items(): _num_crit, _num_warn = cls.validate( _v, path=("%s.%s" % (path, _k)), nested=nested-1, parent_cfg=cfg ) num_crit += _num_crit num_warn += _num_warn return num_crit, num_warn elif type(cfg) == list: keys = list(range(0, len(cfg))) else: raise ValueError("Cannot validate non-iterable config value") # validate existing keys in the config for key in keys: try: _num_crit, _num_warn = cls.check(cfg, key, path) num_crit += _num_crit num_warn += _num_warn except ( vodka.exceptions.ConfigErrorUnknown, vodka.exceptions.ConfigErrorValue, vodka.exceptions.ConfigErrorType ) as inst: if inst.level == "warn": vodka.log.warn(inst.explanation) num_warn += 1 elif inst.level == "critical": vodka.log.error(inst.explanation) num_crit += 1 return num_crit, num_warn
python
{ "resource": "" }
q41854
Handler.attributes
train
def attributes(cls): """ yields tuples for all attributes defined on this handler tuple yielded: name (str), attribute (Attribute) """ for k in dir(cls): v = getattr(cls, k) if isinstance(v, Attribute): yield k,v
python
{ "resource": "" }
q41855
Config.read
train
def read(self, config_dir=None, clear=False, config_file=None): """ The munge Config's read function only allows to read from a config directory, but we also want to be able to read straight from a config file as well """ if config_file: data_file = os.path.basename(config_file) data_path = os.path.dirname(config_file) if clear: self.clear() config = munge.load_datafile(data_file, data_path, default=None) if not config: raise IOError("Config file not found: %s" % config_file) munge.util.recursive_update(self.data, config) self._meta_config_dir = data_path return else: return super(Config, self).read(config_dir=config_dir, clear=clear)
python
{ "resource": "" }
q41856
FavoritesAPI.remove_group_from_favorites
train
def remove_group_from_favorites(self, id): """ Remove group from favorites. Remove a group from the current user's favorites. """ path = {} data = {} params = {} # REQUIRED - PATH - id """the ID or SIS ID of the group to remove""" path["id"] = id self.logger.debug("DELETE /api/v1/users/self/favorites/groups/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/self/favorites/groups/{id}".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q41857
FavoritesAPI.reset_course_favorites
train
def reset_course_favorites(self): """ Reset course favorites. Reset the current user's course favorites to the default automatically generated list of enrolled courses """ path = {} data = {} params = {} self.logger.debug("DELETE /api/v1/users/self/favorites/courses with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/self/favorites/courses".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q41858
SearchAPI.list_all_courses
train
def list_all_courses(self, open_enrollment_only=None, public_only=None, search=None): """ List all courses. List all courses visible in the public index """ path = {} data = {} params = {} # OPTIONAL - search """Search terms used for matching users/courses/groups (e.g. "bob smith"). If multiple terms are given (separated via whitespace), only results matching all terms will be returned.""" if search is not None: params["search"] = search # OPTIONAL - public_only """Only return courses with public content. Defaults to false.""" if public_only is not None: params["public_only"] = public_only # OPTIONAL - open_enrollment_only """Only return courses that allow self enrollment. Defaults to false.""" if open_enrollment_only is not None: params["open_enrollment_only"] = open_enrollment_only self.logger.debug("GET /api/v1/search/all_courses with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/search/all_courses".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q41859
CommunicationChannelsAPI.create_communication_channel
train
def create_communication_channel(self, user_id, communication_channel_type, communication_channel_address, communication_channel_token=None, skip_confirmation=None): """ Create a communication channel. Creates a new communication channel for the specified user. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - communication_channel[address] """An email address or SMS number. Not required for "push" type channels.""" data["communication_channel[address]"] = communication_channel_address # REQUIRED - communication_channel[type] """The type of communication channel. In order to enable push notification support, the server must be properly configured (via sns.yml) to communicate with Amazon Simple Notification Services, and the developer key used to create the access token from this request must have an SNS ARN configured on it.""" self._validate_enum(communication_channel_type, ["email", "sms", "push"]) data["communication_channel[type]"] = communication_channel_type # OPTIONAL - communication_channel[token] """A registration id, device token, or equivalent token given to an app when registering with a push notification provider. Only valid for "push" type channels.""" if communication_channel_token is not None: data["communication_channel[token]"] = communication_channel_token # OPTIONAL - skip_confirmation """Only valid for site admins and account admins making requests; If true, the channel is automatically validated and no confirmation email or SMS is sent. Otherwise, the user must respond to a confirmation message to confirm the channel.""" if skip_confirmation is not None: data["skip_confirmation"] = skip_confirmation self.logger.debug("POST /api/v1/users/{user_id}/communication_channels with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/users/{user_id}/communication_channels".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q41860
CommunicationChannelsAPI.delete_communication_channel_id
train
def delete_communication_channel_id(self, id, user_id): """ Delete a communication channel. Delete an existing communication channel. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("DELETE /api/v1/users/{user_id}/communication_channels/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/{user_id}/communication_channels/{id}".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q41861
Literal.from_str
train
def from_str(cls, string): """ Creates a literal from a string Parameters ---------- string : str If the string starts with '!', it's interpreted as a negated variable Returns ------- caspo.core.literal.Literal Created object instance """ if string[0] == '!': signature = -1 variable = string[1:] else: signature = 1 variable = string return cls(variable, signature)
python
{ "resource": "" }
q41862
normalize_signature
train
def normalize_signature(func): """Decorator. Combine args and kwargs. Unpack single item tuples.""" @wraps(func) def wrapper(*args, **kwargs): if kwargs: args = args, kwargs if len(args) is 1: args = args[0] return func(args) return wrapper
python
{ "resource": "" }
q41863
filter_commands
train
def filter_commands(commands, invalid_query_starts=('DROP', 'UNLOCK', 'LOCK')): """ Remove particular queries from a list of SQL commands. :param commands: List of SQL commands :param invalid_query_starts: Type of SQL command to remove :return: Filtered list of SQL commands """ commands_with_drops = len(commands) filtered_commands = [c for c in commands if not c.startswith(invalid_query_starts)] if commands_with_drops - len(filtered_commands) > 0: print("\t" + str(invalid_query_starts) + " commands removed", commands_with_drops - len(filtered_commands)) return filtered_commands
python
{ "resource": "" }
q41864
prepare_sql
train
def prepare_sql(sql, add_semicolon=True, invalid_starts=('--', '/*', '*/', ';')): """Wrapper method for PrepareSQL class.""" return PrepareSQL(sql, add_semicolon, invalid_starts).prepared
python
{ "resource": "" }
q41865
PrepareSQL._get_next_occurrence
train
def _get_next_occurrence(haystack, offset, needles): """ Find next occurence of one of the needles in the haystack :return: tuple of (index, needle found) or: None if no needle was found""" # make map of first char to full needle (only works if all needles # have different first characters) firstcharmap = dict([(n[0], n) for n in needles]) firstchars = firstcharmap.keys() while offset < len(haystack): if haystack[offset] in firstchars: possible_needle = firstcharmap[haystack[offset]] if haystack[offset:offset + len(possible_needle)] == possible_needle: return offset, possible_needle offset += 1 return None
python
{ "resource": "" }
q41866
render_template_directory
train
def render_template_directory(deck, arguments): """Render a template directory""" output_directory = dir_name_from_title(deck.title) if os.path.exists(output_directory): if sys.stdout.isatty(): if ask( '%s already exists, shall I delete it?' % output_directory, arguments.get('--noinput') ): shutil.rmtree(output_directory) else: shutil.rmtree(output_directory) # copy support files to output directory template_directory_path = ( '%s/templates/%s' % (remarkable.__path__[0], deck.presentation_type) ) shutil.copytree( template_directory_path, output_directory, ) # copy resources if os.path.exists('resources'): log.info('Copying resources') shutil.copytree('resources', '%s/resources' % output_directory) else: log.info('No resources to copy') # render template template_filename = '%s/index.html' % deck.presentation_type html = render_template(template_filename, deck.json) # write index to output directory index_filename = '%s/index.html' % output_directory write_file(index_filename, html) return output_directory
python
{ "resource": "" }
q41867
main
train
def main(): ''' set things up ''' configs = setup(argparse.ArgumentParser()) harvester = GreyHarvester( test_domain=configs['test_domain'], test_sleeptime=TEST_SLEEPTIME, https_only=configs['https_only'], allowed_countries=configs['allowed_countries'], denied_countries=configs['denied_countries'], ports=configs['ports'], max_timeout=configs['max_timeout'] ) ''' harvest free and working proxies from teh interwebz ''' count = 0 for proxy in harvester.run(): if count >= configs['num_proxies']: break print(proxy) count += 1
python
{ "resource": "" }
q41868
GreyHarvester._extract_proxies
train
def _extract_proxies(self, ajax_endpoint): ''' request the xml object ''' proxy_xml = requests.get(ajax_endpoint) print(proxy_xml.content) root = etree.XML(proxy_xml.content) quote = root.xpath('quote')[0] ''' extract the raw text from the body of the quote tag ''' raw_text = quote.text ''' eliminate the stuff we don't need ''' proxy_data = raw_text.split('You will definitely love it! Give it a try!</td></tr>')[1] ''' get rid of the </table> at the end of proxy_data ''' proxy_data = proxy_data[:-len('</table>')] ''' split proxy_data into rows ''' table_rows = proxy_data.split('<tr>') ''' convert each row into a Proxy object ''' for row in table_rows: ''' get rid of the </tr> at the end of each row ''' row = row[:-len('</tr>')] ''' split each row into a list of items ''' items = row.split('<td>') ''' sometimes we get weird lists containing only an empty string ''' if len(items) != 7: continue ''' we'll use this to remove the </td> from the end of each item ''' tdlen = len('</td>') ''' create proxy dict ''' proxy = Proxy( ip=items[1][:-tdlen], port=int(items[2][:-tdlen]), https=bool(items[3][:-tdlen]), latency=int(items[4][:-tdlen]), last_checked=items[5][:-tdlen], country=items[6][:-tdlen], ) yield proxy
python
{ "resource": "" }
q41869
GreyHarvester._passes_filter
train
def _passes_filter(self, proxy): ''' avoid redudant and space consuming calls to 'self' ''' ''' validate proxy based on provided filters ''' if self.allowed_countries is not None and proxy['country'] not in self.allowed_countries: return False if self.denied_countries is not None and proxy['country'] in self.denied_countries: return False if self.https_only and proxy['https'] == False: return False if not self.all_ports and str(proxy.port) not in self.ports: return False return True
python
{ "resource": "" }
q41870
MappingList.iteritems
train
def iteritems(self): """ Iterates over all mappings Yields ------ (int,Mapping) The next pair (index, mapping) """ for m in self.mappings: yield self.indexes[m.clause][m.target], m
python
{ "resource": "" }
q41871
Mapping.from_str
train
def from_str(cls, string): """ Creates a mapping from a string Parameters ---------- string : str String of the form `target<-clause` where `clause` is a valid string for :class:`caspo.core.clause.Clause` Returns ------- caspo.core.mapping.Mapping Created object instance """ if "<-" not in string: raise ValueError("Cannot parse the given string to a mapping") target, clause_str = string.split('<-') return cls(Clause.from_str(clause_str), target)
python
{ "resource": "" }
q41872
LoginsAPI.list_user_logins_users
train
def list_user_logins_users(self, user_id): """ List user logins. Given a user ID, return that user's logins for the given account. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id self.logger.debug("GET /api/v1/users/{user_id}/logins with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/users/{user_id}/logins".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q41873
LoginsAPI.create_user_login
train
def create_user_login(self, user_id, account_id, login_unique_id, login_authentication_provider_id=None, login_integration_id=None, login_password=None, login_sis_user_id=None): """ Create a user login. Create a new login for an existing user in the given account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - user[id] """The ID of the user to create the login for.""" data["user[id]"] = user_id # REQUIRED - login[unique_id] """The unique ID for the new login.""" data["login[unique_id]"] = login_unique_id # OPTIONAL - login[password] """The new login's password.""" if login_password is not None: data["login[password]"] = login_password # OPTIONAL - login[sis_user_id] """SIS ID for the login. To set this parameter, the caller must be able to manage SIS permissions on the account.""" if login_sis_user_id is not None: data["login[sis_user_id]"] = login_sis_user_id # OPTIONAL - login[integration_id] """Integration ID for the login. To set this parameter, the caller must be able to manage SIS permissions on the account. The Integration ID is a secondary identifier useful for more complex SIS integrations.""" if login_integration_id is not None: data["login[integration_id]"] = login_integration_id # OPTIONAL - login[authentication_provider_id] """The authentication provider this login is associated with. Logins associated with a specific provider can only be used with that provider. Legacy providers (LDAP, CAS, SAML) will search for logins associated with them, or unassociated logins. New providers will only search for logins explicitly associated with them. This can be the integer ID of the provider, or the type of the provider (in which case, it will find the first matching provider).""" if login_authentication_provider_id is not None: data["login[authentication_provider_id]"] = login_authentication_provider_id self.logger.debug("POST /api/v1/accounts/{account_id}/logins with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/accounts/{account_id}/logins".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q41874
LoginsAPI.edit_user_login
train
def edit_user_login(self, id, account_id, login_integration_id=None, login_password=None, login_sis_user_id=None, login_unique_id=None): """ Edit a user login. Update an existing login for a user in the given account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - login[unique_id] """The new unique ID for the login.""" if login_unique_id is not None: data["login[unique_id]"] = login_unique_id # OPTIONAL - login[password] """The new password for the login. Can only be set by an admin user if admins are allowed to change passwords for the account.""" if login_password is not None: data["login[password]"] = login_password # OPTIONAL - login[sis_user_id] """SIS ID for the login. To set this parameter, the caller must be able to manage SIS permissions on the account.""" if login_sis_user_id is not None: data["login[sis_user_id]"] = login_sis_user_id # OPTIONAL - login[integration_id] """Integration ID for the login. To set this parameter, the caller must be able to manage SIS permissions on the account. The Integration ID is a secondary identifier useful for more complex SIS integrations.""" if login_integration_id is not None: data["login[integration_id]"] = login_integration_id self.logger.debug("PUT /api/v1/accounts/{account_id}/logins/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/accounts/{account_id}/logins/{id}".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q41875
LoginsAPI.delete_user_login
train
def delete_user_login(self, id, user_id): """ Delete a user login. Delete an existing login. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("DELETE /api/v1/users/{user_id}/logins/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/{user_id}/logins/{id}".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q41876
QuizQuestionsAPI.list_questions_in_quiz_or_submission
train
def list_questions_in_quiz_or_submission(self, quiz_id, course_id, quiz_submission_attempt=None, quiz_submission_id=None): """ List questions in a quiz or a submission. Returns the list of QuizQuestions in this quiz. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - quiz_id """ID""" path["quiz_id"] = quiz_id # OPTIONAL - quiz_submission_id """If specified, the endpoint will return the questions that were presented for that submission. This is useful if the quiz has been modified after the submission was created and the latest quiz version's set of questions does not match the submission's. NOTE: you must specify quiz_submission_attempt as well if you specify this parameter.""" if quiz_submission_id is not None: params["quiz_submission_id"] = quiz_submission_id # OPTIONAL - quiz_submission_attempt """The attempt of the submission you want the questions for.""" if quiz_submission_attempt is not None: params["quiz_submission_attempt"] = quiz_submission_attempt self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/questions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/questions".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q41877
QuizQuestionsAPI.create_single_quiz_question
train
def create_single_quiz_question(self, quiz_id, course_id, question_answers=None, question_correct_comments=None, question_incorrect_comments=None, question_neutral_comments=None, question_points_possible=None, question_position=None, question_question_name=None, question_question_text=None, question_question_type=None, question_quiz_group_id=None, question_text_after_answers=None): """ Create a single quiz question. Create a new quiz question for this quiz """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - quiz_id """ID""" path["quiz_id"] = quiz_id # OPTIONAL - question[question_name] """The name of the question.""" if question_question_name is not None: data["question[question_name]"] = question_question_name # OPTIONAL - question[question_text] """The text of the question.""" if question_question_text is not None: data["question[question_text]"] = question_question_text # OPTIONAL - question[quiz_group_id] """The id of the quiz group to assign the question to.""" if question_quiz_group_id is not None: data["question[quiz_group_id]"] = question_quiz_group_id # OPTIONAL - question[question_type] """The type of question. Multiple optional fields depend upon the type of question to be used.""" if question_question_type is not None: self._validate_enum(question_question_type, ["calculated_question", "essay_question", "file_upload_question", "fill_in_multiple_blanks_question", "matching_question", "multiple_answers_question", "multiple_choice_question", "multiple_dropdowns_question", "numerical_question", "short_answer_question", "text_only_question", "true_false_question"]) data["question[question_type]"] = question_question_type # OPTIONAL - question[position] """The order in which the question will be displayed in the quiz in relation to other questions.""" if question_position is not None: data["question[position]"] = question_position # OPTIONAL - question[points_possible] """The maximum amount of points received for answering this question correctly.""" if question_points_possible is not None: data["question[points_possible]"] = question_points_possible # OPTIONAL - question[correct_comments] """The comment to display if the student answers the question correctly.""" if question_correct_comments is not None: data["question[correct_comments]"] = question_correct_comments # OPTIONAL - question[incorrect_comments] """The comment to display if the student answers incorrectly.""" if question_incorrect_comments is not None: data["question[incorrect_comments]"] = question_incorrect_comments # OPTIONAL - question[neutral_comments] """The comment to display regardless of how the student answered.""" if question_neutral_comments is not None: data["question[neutral_comments]"] = question_neutral_comments # OPTIONAL - question[text_after_answers] """no description""" if question_text_after_answers is not None: data["question[text_after_answers]"] = question_text_after_answers # OPTIONAL - question[answers] """no description""" if question_answers is not None: data["question[answers]"] = question_answers self.logger.debug("POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/questions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/questions".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q41878
Http.request
train
def request(self, action, data={}, headers={}, method='GET'): """ Append the user authentication details to every incoming request """ data = self.merge(data, {'user': self.username, 'password': self.password, 'api_id': self.apiId}) return Transport.request(self, action, data, headers, method)
python
{ "resource": "" }
q41879
SpecialCoverage.clean_publish_dates
train
def clean_publish_dates(self): """ If an end_date value is provided, the start_date must be less. """ if self.end_date: if not self.start_date: raise ValidationError("""The End Date requires a Start Date value.""") elif self.end_date <= self.start_date: raise ValidationError("""The End Date must not precede the Start Date.""")
python
{ "resource": "" }
q41880
SpecialCoverage.clean_videos
train
def clean_videos(self): """ Validates that all values in the video list are integer ids and removes all None values. """ if self.videos: self.videos = [int(v) for v in self.videos if v is not None and is_valid_digit(v)]
python
{ "resource": "" }
q41881
SpecialCoverage.clean_super_features
train
def clean_super_features(self): """ Removes any null & non-integer values from the super feature list """ if self.super_features: self.super_features = [int(sf) for sf in self.super_features if sf is not None and is_valid_digit(sf)]
python
{ "resource": "" }
q41882
SpecialCoverage._save_percolator
train
def _save_percolator(self): """ Saves the query field as an elasticsearch percolator """ index = Content.search_objects.mapping.index query_filter = self.get_content(published=False).to_dict() q = {} if "query" in query_filter: q = {"query": query_filter.get("query", {})} else: # We don't know how to save this return # We'll need this data, to decide which special coverage section to use q["sponsored"] = bool(self.tunic_campaign_id) # Elasticsearch v1.4 percolator "field_value_factor" does not # support missing fields, so always need to include q["start_date"] = self.start_date # NOTE: set end_date to datetime.max if special coverage has no end date # (i.e. is a neverending special coverage) q["end_date"] = self.end_date if self.end_date else datetime.max.replace(tzinfo=pytz.UTC) # Elasticsearch v1.4 percolator range query does not support DateTime range queries # (PercolateContext.nowInMillisImpl is not implemented). if q["start_date"]: q['start_date_epoch'] = datetime_to_epoch_seconds(q["start_date"]) if q["end_date"]: q['end_date_epoch'] = datetime_to_epoch_seconds(q["end_date"]) # Store manually included IDs for percolator retrieval scoring (boost # manually included content). if self.query: q['included_ids'] = self.query.get('included_ids', []) es.index( index=index, doc_type=".percolator", body=q, id=self.es_id )
python
{ "resource": "" }
q41883
SpecialCoverage.custom_template_name
train
def custom_template_name(self): """ Returns the path for the custom special coverage template we want. """ base_path = getattr(settings, "CUSTOM_SPECIAL_COVERAGE_PATH", "special_coverage/custom") if base_path is None: base_path = "" return "{0}/{1}_custom.html".format( base_path, self.slug.replace("-", "_") ).lstrip("/")
python
{ "resource": "" }
q41884
ExclusiveBooleanField.deconstruct
train
def deconstruct(self): """ to support Django 1.7 migrations, see also the add_introspection_rules section at bottom of this file for South + earlier Django versions """ name, path, args, kwargs = super( ExclusiveBooleanField, self).deconstruct() if self._on_fields: kwargs['on'] = self._on_fields return name, path, args, kwargs
python
{ "resource": "" }
q41885
Command.sync_apps
train
def sync_apps(self, connection, app_labels): "Runs the old syncdb-style operation on a list of app_labels." cursor = connection.cursor() try: # Get a list of already installed *models* so that references work right. tables = connection.introspection.table_names(cursor) created_models = set() # Build the manifest of apps and models that are to be synchronized all_models = [ (app_config.label, router.get_migratable_models(app_config, connection.alias, include_auto_created=False)) for app_config in apps.get_app_configs() if app_config.models_module is not None and app_config.label in app_labels ] def model_installed(model): opts = model._meta converter = connection.introspection.table_name_converter # Note that if a model is unmanaged we short-circuit and never try to install it return not ( (converter(opts.db_table) in tables) or (opts.auto_created and converter(opts.auto_created._meta.db_table) in tables) ) manifest = OrderedDict( (app_name, list(filter(model_installed, model_list))) for app_name, model_list in all_models ) # Create the tables for each model if self.verbosity >= 1: self.stdout.write(" Creating tables...\n") with transaction.atomic(using=connection.alias, savepoint=connection.features.can_rollback_ddl): with connection.schema_editor() as editor: editor.execute("CREATE SCHEMA {}".format(settings.POSTGRES_TEMPLATE_SCHEMA)) statements = editor.connection.ops.prepare_sql_script(CLONE_SCHEMA) for statement in statements: editor.execute(statement, params=None) schema_deferred_sql = {} with connection.schema_editor() as editor: schema_model = apps.get_model(settings.POSTGRES_SCHEMA_MODEL) editor.create_model(schema_model, verbosity=self.verbosity) schema_deferred_sql.update(editor.schema_deferred_sql) editor.schema_deferred_sql = {} created_models.add(schema_model) for app_name, model_list in manifest.items(): for model in model_list: if not model._meta.can_migrate(connection): continue if model in created_models: continue # probably schema model if self.verbosity >= 3: self.stdout.write( " Processing %s.%s model\n" % (app_name, model._meta.object_name) ) with connection.schema_editor() as editor: editor.schema_deferred_sql.update(schema_deferred_sql) editor.create_model(model, verbosity=self.verbosity) schema_deferred_sql.update(editor.schema_deferred_sql) editor.schema_deferred_sql = {} created_models.add(model) if self.verbosity >= 1: self.stdout.write("\n Running deferred SQL...\n") with connection.schema_editor() as editor: editor.schema_deferred_sql = schema_deferred_sql finally: cursor.close() return created_models
python
{ "resource": "" }
q41886
CloneData.get_database_rows
train
def get_database_rows(self, tables=None, database=None): """Retrieve a dictionary of table keys and list of rows values for every table.""" # Get table data and columns from source database source = database if database else self.database tables = tables if tables else self.tables # Get database select queries commands = self._get_select_commands(source, tables) # Execute select commands return self._execute_select_commands(source, commands)
python
{ "resource": "" }
q41887
CloneData._get_select_commands
train
def _get_select_commands(self, source, tables): """ Create select queries for all of the tables from a source database. :param source: Source database name :param tables: Iterable of table names :return: Dictionary of table keys, command values """ # Create dictionary of select queries row_queries = {tbl: self.select_all(tbl, execute=False) for tbl in tqdm(tables, total=len(tables), desc='Getting {0} select queries'.format(source))} # Convert command strings into lists of commands for tbl, command in row_queries.items(): if isinstance(command, str): row_queries[tbl] = [command] # Pack commands into list of tuples return [(tbl, cmd) for tbl, cmds in row_queries.items() for cmd in cmds]
python
{ "resource": "" }
q41888
CloneData._execute_select_commands
train
def _execute_select_commands(self, source, commands): """Execute select queries for all of the tables from a source database.""" rows = {} for tbl, command in tqdm(commands, total=len(commands), desc='Executing {0} select queries'.format(source)): # Add key to dictionary if tbl not in rows: rows[tbl] = [] rows[tbl].extend(self.fetch(command, commit=True)) self._commit() return rows
python
{ "resource": "" }
q41889
CloneData.get_database_columns
train
def get_database_columns(self, tables=None, database=None): """Retrieve a dictionary of columns.""" # Get table data and columns from source database source = database if database else self.database tables = tables if tables else self.tables return {tbl: self.get_columns(tbl) for tbl in tqdm(tables, total=len(tables), desc='Getting {0} columns'.format(source))}
python
{ "resource": "" }
q41890
CloneData._get_insert_commands
train
def _get_insert_commands(self, rows, cols): """Retrieve dictionary of insert statements to be executed.""" # Get insert queries insert_queries = {} for table in tqdm(list(rows.keys()), total=len(list(rows.keys())), desc='Getting insert rows queries'): insert_queries[table] = {} _rows = rows.pop(table) _cols = cols.pop(table) if len(_rows) > 1: insert_queries[table]['insert_many'] = self.insert_many(table, _cols, _rows, execute=False) elif len(_rows) == 1: insert_queries[table]['insert'] = self.insert(table, _cols, _rows, execute=False) return insert_queries
python
{ "resource": "" }
q41891
CloneDatabase.copy_database_structure
train
def copy_database_structure(self, source, destination, tables=None): """Copy multiple tables from one database to another.""" # Change database to source self.change_db(source) if tables is None: tables = self.tables # Change database to destination self.change_db(destination) for t in tqdm(tables, total=len(tables), desc='Copying {0} table structure'.format(source)): self.copy_table_structure(source, destination, t)
python
{ "resource": "" }
q41892
CloneDatabase.copy_table_structure
train
def copy_table_structure(self, source, destination, table): """ Copy a table from one database to another. :param source: Source database :param destination: Destination database :param table: Table name """ self.execute('CREATE TABLE {0}.{1} LIKE {2}.{1}'.format(destination, wrap(table), source))
python
{ "resource": "" }
q41893
CloneDatabase.copy_database_data
train
def copy_database_data(self, source, destination, optimized=False): """ Copy the data from one database to another. Retrieve existing data from the source database and insert that data into the destination database. """ # Change database to source self.enable_printing = False self.change_db(source) tables = self.tables # Copy database data by executing INSERT and SELECT commands in a single query if optimized: self._copy_database_data_serverside(source, destination, tables) # Generate and execute SELECT and INSERT commands else: self._copy_database_data_clientside(tables, source, destination) self.enable_printing = True
python
{ "resource": "" }
q41894
CloneDatabase._copy_database_data_serverside
train
def _copy_database_data_serverside(self, source, destination, tables): """Select rows from a source database and insert them into a destination db in one query""" for table in tqdm(tables, total=len(tables), desc='Copying table data (optimized)'): self.execute('INSERT INTO {0}.{1} SELECT * FROM {2}.{1}'.format(destination, wrap(table), source))
python
{ "resource": "" }
q41895
CloneDatabase._copy_database_data_clientside
train
def _copy_database_data_clientside(self, tables, source, destination): """Copy the data from a table into another table.""" # Retrieve database rows rows = self.get_database_rows(tables, source) # Retrieve database columns cols = self.get_database_columns(tables, source) # Validate rows and columns for r in list(rows.keys()): assert r in tables for c in list(cols.keys()): assert c in tables # Change database to destination self.change_db(destination) # Get insert queries insert_queries = self._get_insert_commands(rows, cols) # Execute insert queries self._execute_insert_commands(insert_queries)
python
{ "resource": "" }
q41896
Clone.copy_database
train
def copy_database(self, source, destination): """ Copy a database's content and structure. SMALL Database speed improvements (DB size < 5mb) Using optimized is about 178% faster Using one_query is about 200% faster LARGE Database speed improvements (DB size > 5mb) Using optimized is about 900% faster Using one_query is about 2600% faster :param source: Source database :param destination: Destination database """ print('\tCopying database {0} structure and data to database {1}'.format(source, destination)) with Timer('\nSuccess! Copied database {0} to {1} in '.format(source, destination)): # Create destination database if it does not exist if destination in self.databases: self.truncate_database(destination) # Truncate database if it does exist else: self.create_database(destination) # Copy database structure and data self.change_db(source) tables = self.tables # Change database to destination self.change_db(destination) print('\n') _enable_printing = self.enable_printing self.enable_printing = False # Copy tables structure for table in tqdm(tables, total=len(tables), desc='Copying {0} table structures'.format(source)): self.execute('CREATE TABLE {0}.{1} LIKE {2}.{1}'.format(destination, wrap(table), source)) # Copy tables data for table in tqdm(tables, total=len(tables), desc='Copying {0} table data'.format(source)): self.execute('INSERT INTO {0}.{1} SELECT * FROM {2}.{1}'.format(destination, wrap(table), source)) self.enable_printing = _enable_printing
python
{ "resource": "" }
q41897
CuReSim.recode_curesim_reads
train
def recode_curesim_reads( curesim_fastq_fo, rnf_fastq_fo, fai_fo, genome_id, number_of_read_tuples=10**9, recode_random=False, ): """Recode CuReSim output FASTQ file to the RNF-compatible output FASTQ file. Args: curesim_fastq_fo (file object): File object of CuReSim FASTQ file. fastq_rnf_fo (file object): File object of RNF FASTQ. fai_fo (file object): File object for FAI file of the reference genome. genome_id (int): RNF genome ID to be used. number_of_read_tuples (int): Expected number of read tuples (to estimate number of digits in RNF). recode_random (bool): Recode random reads. Raises: ValueError """ curesim_pattern = re.compile('@(.*)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)') """ CuReSim read name format @<#1>_<#2>_<#3>_<#4>_<#5>_<#6>_<#7>_<#8> 1: contig name 2: original position 3: strand (0=forward;1=reverse) 4: random read (0=non-random;1=random) 5: number of insertions 6: number of deletions 7: number of substitution 8: read number (unique within a genome) """ max_seq_len = 0 fai_index = rnftools.utils.FaIdx(fai_fo=fai_fo) read_tuple_id_width = len(format(number_of_read_tuples, 'x')) fq_creator = rnftools.rnfformat.FqCreator( fastq_fo=rnf_fastq_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator="curesim", ) # parsing FQ file read_tuple_id = 0 i = 0 for line in curesim_fastq_fo: if i % 4 == 0: m = curesim_pattern.search(line) if m is None: rnftools.utils.error( "Read '{}' was not generated by CuReSim.".format(line[1:]), program="RNFtools", subprogram="MIShmash", exception=ValueError ) contig_name = m.group(1) start_pos = int(m.group(2)) direction = "R" if int(m.group(3)) else "F" random = bool(m.group(4)) ins_nb = int(m.group(5)) del_nb = int(m.group(6)) subst_nb = int(m.group(7)) rd_id = int(m.group(8)) end_pos = start_pos - 1 - ins_nb + del_nb chr_id = 0 random = contig_name[:4] == "rand" # TODO: uncomment when the chromosome naming bug in curesim is corrected # chr_id = self.dict_chr_ids[contig_name] if self.dict_chr_ids!={} else "0" elif i % 4 == 1: bases = line.strip() end_pos += len(bases) if recode_random: left = 0 right = 0 else: left = start_pos + 1 right = end_pos segment = rnftools.rnfformat.Segment( genome_id=genome_id, chr_id=chr_id, direction=direction, left=left, right=right, ) elif i % 4 == 2: pass elif i % 4 == 3: qualities = line.strip() if random == recode_random: fq_creator.add_read( read_tuple_id=read_tuple_id, bases=bases, qualities=qualities, segments=[segment], ) read_tuple_id += 1 i += 1 fq_creator.flush_read_tuple()
python
{ "resource": "" }
q41898
get_config_dir
train
def get_config_dir(program='', system_wide=False): '''Get the configuration directory. Get the configuration directories, optionally for a specific program. Args: program (str) : The name of the program whose configuration directories have to be found. system_wide (bool): Gets the system-wide configuration directories. Returns: list: A list of all matching configuration directories found. ''' config_homes = [] if system_wide: if os.name == 'nt': config_homes.append( winreg.ExpandEnvironmentStrings('%PROGRAMDATA%')) else: config_homes.append('/etc') config_homes.append('/etc/xdg') if os.name == 'darwin': config_homes.append('/Library') else: if os.name == 'nt': import winreg config_homes.append( winreg.ExpandEnvironmentStrings('%LOCALAPPDATA%')) config_homes.append( os.path.join( winreg.ExpandEnvironmentStrings('%APPDATA%'), 'Roaming')) else: if os.getenv('XDG_CONFIG_HOME'): config_homes.append(os.getenv('XDG_CONFIG_HOME')) else: try: from xdg import BaseDirectory config_homes.append(BaseDirectory.xdg_config_home) except ImportError: config_homes.append(os.path.expanduser('~/.config')) config_homes.append(os.path.expanduser('~')) if os.name == 'darwin': config_homes.append(os.path.expanduser('~/Library')) if program: def __find_homes(app, dirs): homes = [] for home in dirs: if os.path.isdir(os.path.join(home, app)): homes.append(os.path.join(home, app)) if os.path.isdir(os.path.join(home, '.' + app)): homes.append(os.path.join(home, '.' + app)) if os.path.isdir(os.path.join(home, app + '.d')): homes.append(os.path.join(home, app + '.d')) return homes app_homes = __find_homes(program, config_homes) # Special Cases if program == 'vim': app_homes.extend(__find_homes('vimfiles', config_homes)) elif program == 'chrome': app_homes.extend(__find_homes('google-chrome', config_homes)) elif program in ['firefox', 'thunderbird']: app_homes.extend( __find_homes( program, [ os.path.expanduser('~/.mozilla')])) return app_homes return config_homes
python
{ "resource": "" }
q41899
get_config_file
train
def get_config_file(program, system_wide=False): '''Get the configuration file for a program. Gets the configuration file for a given program, assuming it stores it in a standard location. See also :func:`get_config_dir()`. Args: program (str): The program for which to get the configuration file. system_wide (bool):Whether to get the system-wide file for the program. Returns: list: A list of all matching configuration files found. ''' program_config_homes = get_config_dir(program, system_wide) config_homes = get_config_dir(system_wide=system_wide) config_files = [] for home in config_homes: for sub in os.listdir(home): if os.path.isfile(os.path.join(home, sub)): if sub.startswith(program): config_files.append(os.path.join(home, sub)) if not program.startswith('.'): config_files.extend(get_config_file('.' + program, system_wide)) for home in program_config_homes: for sub in os.listdir(home): if os.path.isfile(os.path.join(home, sub) ) and sub.startswith(program): config_files.append(os.path.join(home, sub)) return config_files
python
{ "resource": "" }