_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q42400
BaseSR._yield_all
train
def _yield_all(self, l): ''' Given a iterable like list or tuple the function yields each of its items with _yield ''' if l is not None: if type(l) in [list, tuple]: for f in l: for x in self._yield(f): yield x else: for x in self._yield(l): yield x
python
{ "resource": "" }
q42401
MoneyBird.post
train
def post(self, resource_path: str, data: dict, administration_id: int = None): """ Performs a POST request to the endpoint identified by the resource path. POST requests are usually used to add new data. Example: >>> from moneybird import MoneyBird, TokenAuthentication >>> moneybird = MoneyBird(TokenAuthentication('access_token')) >>> data = {'url': 'http://www.mocky.io/v2/5185415ba171ea3a00704eed'} >>> moneybird.post('webhooks', data, 123) {'id': '143274315994891267', 'url': 'http://www.mocky.io/v2/5185415ba171ea3a00704eed', ... :param resource_path: The resource path. :param data: The data to send to the server. :param administration_id: The administration id (optional, depending on the resource path). :return: The decoded JSON response for the request. """ response = self.session.post( url=self._get_url(administration_id, resource_path), json=data, ) return self._process_response(response)
python
{ "resource": "" }
q42402
MoneyBird.renew_session
train
def renew_session(self): """ Clears all session data and starts a new session using the same settings as before. This method can be used to clear session data, e.g., cookies. Future requests will use a new session initiated with the same settings and authentication method. """ logger.debug("API session renewed") self.session = self.authentication.get_session() self.session.headers.update({ 'User-Agent': 'MoneyBird for Python %s' % VERSION, 'Accept': 'application/json', })
python
{ "resource": "" }
q42403
MoneyBird._get_url
train
def _get_url(cls, administration_id: int, resource_path: str): """ Builds the URL to the API endpoint specified by the given parameters. :param administration_id: The ID of the administration (may be None). :param resource_path: The path to the resource. :return: The absolute URL to the endpoint. """ url = urljoin(cls.base_url, '%s/' % cls.version) if administration_id is not None: url = urljoin(url, '%s/' % administration_id) url = urljoin(url, '%s.json' % resource_path) return url
python
{ "resource": "" }
q42404
MoneyBird._process_response
train
def _process_response(response: requests.Response, expected: list = []) -> dict: """ Processes an API response. Raises an exception when appropriate. The exception that will be raised is MoneyBird.APIError. This exception is subclassed so implementing programs can easily react appropriately to different exceptions. The following subclasses of MoneyBird.APIError are likely to be raised: - MoneyBird.Unauthorized: No access to the resource or invalid authentication - MoneyBird.Throttled: Access (temporarily) denied, please try again - MoneyBird.NotFound: Resource not found, check resource path - MoneyBird.InvalidData: Validation errors occured while processing your input - MoneyBird.ServerError: Error on the server :param response: The response to process. :param expected: A list of expected status codes which won't raise an exception. :return: The useful data in the response (may be None). """ responses = { 200: None, 201: None, 204: None, 400: MoneyBird.Unauthorized, 401: MoneyBird.Unauthorized, 403: MoneyBird.Throttled, 404: MoneyBird.NotFound, 406: MoneyBird.NotFound, 422: MoneyBird.InvalidData, 429: MoneyBird.Throttled, 500: MoneyBird.ServerError, } logger.debug("API request: %s %s\n" % (response.request.method, response.request.url) + "Response: %s %s" % (response.status_code, response.text)) if response.status_code not in expected: if response.status_code not in responses: logger.error("API response contained unknown status code") raise MoneyBird.APIError(response, "API response contained unknown status code") elif responses[response.status_code] is not None: try: description = response.json()['error'] except (AttributeError, TypeError, KeyError, ValueError): description = None raise responses[response.status_code](response, description) try: data = response.json() except ValueError: logger.error("API response is not JSON decodable") data = None return data
python
{ "resource": "" }
q42405
VideohubVideo.search_videohub
train
def search_videohub(cls, query, filters=None, status=None, sort=None, size=None, page=None): """searches the videohub given a query and applies given filters and other bits :see: https://github.com/theonion/videohub/blob/master/docs/search/post.md :see: https://github.com/theonion/videohub/blob/master/docs/search/get.md :param query: query terms to search by :type query: str :example query: "brooklyn hipsters" # although, this is a little redundant... :param filters: video field value restrictions :type filters: dict :default filters: None :example filters: {"channel": "onion"} or {"series": "Today NOW"} :param status: limit the results to videos that are published, scheduled, draft :type status: str :default status: None :example status: "published" or "draft" or "scheduled" :param sort: video field related sorting :type sort: dict :default sort: None :example sort: {"title": "desc"} or {"description": "asc"} :param size: the page size (number of results) :type size: int :default size: None :example size": {"size": 20} :param page: the page number of the results :type page: int :default page: None :example page: {"page": 2} # note, you should use `size` in conjunction with `page` :return: a dictionary of results and meta information :rtype: dict """ # construct url url = getattr(settings, "VIDEOHUB_API_SEARCH_URL", cls.DEFAULT_VIDEOHUB_API_SEARCH_URL) # construct auth headers headers = { "Content-Type": "application/json", "Authorization": settings.VIDEOHUB_API_TOKEN, } # construct payload payload = { "query": query, } if filters: assert isinstance(filters, dict) payload["filters"] = filters if status: assert isinstance(status, six.string_types) payload.setdefault("filters", {}) payload["filters"]["status"] = status if sort: assert isinstance(sort, dict) payload["sort"] = sort if size: assert isinstance(size, (six.string_types, int)) payload["size"] = size if page: assert isinstance(page, (six.string_types, int)) payload["page"] = page # send request res = requests.post(url, data=json.dumps(payload), headers=headers) # raise if not 200 if res.status_code != 200: res.raise_for_status() # parse and return response return json.loads(res.content)
python
{ "resource": "" }
q42406
VideohubVideo.get_hub_url
train
def get_hub_url(self): """gets a canonical path to the detail page of the video on the hub :return: the path to the consumer ui detail page of the video :rtype: str """ url = getattr(settings, "VIDEOHUB_VIDEO_URL", self.DEFAULT_VIDEOHUB_VIDEO_URL) # slugify needs ascii ascii_title = "" if isinstance(self.title, str): ascii_title = self.title elif six.PY2 and isinstance(self.title, six.text_type): # Legacy unicode conversion ascii_title = self.title.encode('ascii', 'replace') path = slugify("{}-{}".format(ascii_title, self.id)) return url.format(path)
python
{ "resource": "" }
q42407
VideohubVideo.get_embed_url
train
def get_embed_url(self, targeting=None, recirc=None): """gets a canonical path to an embedded iframe of the video from the hub :return: the path to create an embedded iframe of the video :rtype: str """ url = getattr(settings, "VIDEOHUB_EMBED_URL", self.DEFAULT_VIDEOHUB_EMBED_URL) url = url.format(self.id) if targeting is not None: for k, v in sorted(targeting.items()): url += '&{0}={1}'.format(k, v) if recirc is not None: url += '&recirc={0}'.format(recirc) return url
python
{ "resource": "" }
q42408
VideohubVideo.get_api_url
train
def get_api_url(self): """gets a canonical path to the api detail url of the video on the hub :return: the path to the api detail of the video :rtype: str """ url = getattr(settings, 'VIDEOHUB_API_URL', None) # Support alternate setting (used by most client projects) if not url: url = getattr(settings, 'VIDEOHUB_API_BASE_URL', None) if url: url = url.rstrip('/') + '/videos/{}' if not url: url = self.DEFAULT_VIDEOHUB_API_URL return url.format(self.id)
python
{ "resource": "" }
q42409
OutcomeGroupsAPI.create_subgroup_global
train
def create_subgroup_global(self, id, title, description=None, vendor_guid=None): """ Create a subgroup. Creates a new empty subgroup under the outcome group with the given title and description. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - title """The title of the new outcome group.""" data["title"] = title # OPTIONAL - description """The description of the new outcome group.""" if description is not None: data["description"] = description # OPTIONAL - vendor_guid """A custom GUID for the learning standard""" if vendor_guid is not None: data["vendor_guid"] = vendor_guid self.logger.debug("POST /api/v1/global/outcome_groups/{id}/subgroups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/global/outcome_groups/{id}/subgroups".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42410
GradingPeriodsAPI.update_single_grading_period
train
def update_single_grading_period(self, id, course_id, grading_periods_end_date, grading_periods_start_date, grading_periods_weight=None): """ Update a single grading period. Update an existing grading period. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - grading_periods[start_date] """The date the grading period starts.""" data["grading_periods[start_date]"] = grading_periods_start_date # REQUIRED - grading_periods[end_date] """no description""" data["grading_periods[end_date]"] = grading_periods_end_date # OPTIONAL - grading_periods[weight] """A weight value that contributes to the overall weight of a grading period set which is used to calculate how much assignments in this period contribute to the total grade""" if grading_periods_weight is not None: data["grading_periods[weight]"] = grading_periods_weight self.logger.debug("PUT /api/v1/courses/{course_id}/grading_periods/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/grading_periods/{id}".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42411
AppointmentGroupsAPI.get_next_appointment
train
def get_next_appointment(self, appointment_group_ids=None): """ Get next appointment. Return the next appointment available to sign up for. The appointment is returned in a one-element array. If no future appointments are available, an empty array is returned. """ path = {} data = {} params = {} # OPTIONAL - appointment_group_ids """List of ids of appointment groups to search.""" if appointment_group_ids is not None: params["appointment_group_ids"] = appointment_group_ids self.logger.debug("GET /api/v1/appointment_groups/next_appointment with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/appointment_groups/next_appointment".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42412
Dataset.to_funset
train
def to_funset(self, discrete): """ Converts the dataset to a set of `gringo.Fun`_ instances Parameters ---------- discrete : callable A discretization function Returns ------- set Representation of the dataset as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = self.clampings.to_funset("exp") fs = fs.union(self.setup.to_funset()) for i, row in self.readouts.iterrows(): for var, val in row.iteritems(): if not np.isnan(val): fs.add(gringo.Fun('obs', [i, var, discrete(val)])) return fs
python
{ "resource": "" }
q42413
set_dump_directory
train
def set_dump_directory(base=None, sub_dir=None): """Create directory for dumping SQL commands.""" # Set current timestamp timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H-%M-%S') # Clean sub_dir if sub_dir and '.' in sub_dir: sub_dir = sub_dir.rsplit('.', 1)[0] # Create a directory to save fail SQL scripts # TODO: Replace with function that recursively creates directories until path exists if not os.path.exists(base): os.mkdir(base) dump_dir = os.path.join(base, sub_dir) if sub_dir else base if not os.path.exists(dump_dir): os.mkdir(dump_dir) dump_dir = os.path.join(dump_dir, timestamp) if not os.path.exists(dump_dir): os.mkdir(dump_dir) return dump_dir
python
{ "resource": "" }
q42414
dump_commands
train
def dump_commands(commands, directory=None, sub_dir=None): """ Dump SQL commands to .sql files. :param commands: List of SQL commands :param directory: Directory to dump commands to :param sub_dir: Sub directory :return: Directory failed commands were dumped to """ print('\t' + str(len(commands)), 'failed commands') # Create dump_dir directory if directory and os.path.isfile(directory): dump_dir = set_dump_directory(os.path.dirname(directory), sub_dir) return_dir = dump_dir elif directory: dump_dir = set_dump_directory(directory, sub_dir) return_dir = dump_dir else: dump_dir = TemporaryDirectory().name return_dir = TemporaryDirectory() # Create list of (path, content) tuples command_filepath = [(fail, os.path.join(dump_dir, str(count) + '.sql')) for count, fail in enumerate(commands)] # Dump failed commands to text file in the same directory as the commands # Utilize's multiprocessing module if it is available timer = Timer() if MULTIPROCESS: pool = Pool(cpu_count()) pool.map(write_text_tup, command_filepath) pool.close() print('\tDumped ', len(command_filepath), 'commands\n\t\tTime : {0}'.format(timer.end), '\n\t\tMethod : (multiprocessing)\n\t\tDirectory : {0}'.format(dump_dir)) else: for tup in command_filepath: write_text_tup(tup) print('\tDumped ', len(command_filepath), 'commands\n\t\tTime : {0}'.format(timer.end), '\n\t\tMethod : (sequential)\n\t\tDirectory : {0}'.format(dump_dir)) # Return base directory of dumped commands return return_dir
python
{ "resource": "" }
q42415
write_text
train
def write_text(_command, txt_file): """Dump SQL command to a text file.""" command = _command.strip() with open(txt_file, 'w') as txt: txt.writelines(command)
python
{ "resource": "" }
q42416
get_commands_from_dir
train
def get_commands_from_dir(directory, zip_backup=True, remove_dir=True): """Traverse a directory and read contained SQL files.""" # Get SQL commands file paths failed_scripts = sorted([os.path.join(directory, fn) for fn in os.listdir(directory) if fn.endswith('.sql')]) # Read each failed SQL file and append contents to a list print('\tReading SQL scripts from files') commands = [] for sql_file in failed_scripts: with open(sql_file, 'r') as txt: sql_command = txt.read() commands.append(sql_command) # Remove most recent failures folder after reading if zip_backup: ZipBackup(directory).backup() if remove_dir: shutil.rmtree(directory) return commands
python
{ "resource": "" }
q42417
ParameterSet.blueprint
train
def blueprint(self): """ blueprint support, returns a partial dictionary """ blueprint = dict() blueprint['type'] = "%s.%s" % (self.__module__, self.__class__.__name__) # Fields fields = dict() # inspects the attributes of a parameter set and tries to validate the input for attribute_name, type_instance in self.getmembers(): # must be one of the following types if not isinstance(type_instance, String) and \ not isinstance(type_instance, Float) and \ not isinstance(type_instance, Integer) and \ not isinstance(type_instance, Date) and \ not isinstance(type_instance, DateTime) and \ not isinstance(type_instance, Array): raise TypeError("%s should be instance of\ prestans.types.String/Integer/Float/Date/DateTime/Array" % attribute_name) if isinstance(type_instance, Array): if not isinstance(type_instance.element_template, String) and \ not isinstance(type_instance.element_template, Float) and \ not isinstance(type_instance.element_template, Integer): raise TypeError("%s should be instance of \ prestans.types.String/Integer/Float/Array" % attribute_name) fields[attribute_name] = type_instance.blueprint() blueprint['fields'] = fields return blueprint
python
{ "resource": "" }
q42418
ParameterSet.validate
train
def validate(self, request): """ validate method for %ParameterSet Since the introduction of ResponseFieldListParser, the parameter _response_field_list will be ignored, this is a prestans reserved parameter, and cannot be used by apps. :param request: The request object to be validated :type request: webob.request.Request :return The validated parameter set :rtype: ParameterSet """ validated_parameter_set = self.__class__() # Inspects the attributes of a parameter set and tries to validate the input for attribute_name, type_instance in self.getmembers(): #: Must be one of the following types if not isinstance(type_instance, String) and \ not isinstance(type_instance, Float) and \ not isinstance(type_instance, Integer) and \ not isinstance(type_instance, Date) and \ not isinstance(type_instance, DateTime) and \ not isinstance(type_instance, Array): raise TypeError("%s should be of type \ prestans.types.String/Integer/Float/Date/DateTime/Array" % attribute_name) if issubclass(type_instance.__class__, Array): if not isinstance(type_instance.element_template, String) and \ not isinstance(type_instance.element_template, Float) and \ not isinstance(type_instance.element_template, Integer): raise TypeError("%s elements should be of \ type prestans.types.String/Integer/Float" % attribute_name) try: #: Get input from parameters #: Empty list returned if key is missing for getall if issubclass(type_instance.__class__, Array): validation_input = request.params.getall(attribute_name) #: Key error thrown if key is missing for getone else: try: validation_input = request.params.getone(attribute_name) except KeyError: validation_input = None #: Validate input based on data type rules, #: raises DataTypeValidationException if validation fails validation_result = type_instance.validate(validation_input) setattr(validated_parameter_set, attribute_name, validation_result) except exception.DataValidationException as exp: raise exception.ValidationError( message=str(exp), attribute_name=attribute_name, value=validation_input, blueprint=type_instance.blueprint()) return validated_parameter_set
python
{ "resource": "" }
q42419
preload_pages
train
def preload_pages(): """Register all pages before the first application request.""" try: _add_url_rule([page.url for page in Page.query.all()]) except Exception: # pragma: no cover current_app.logger.warn('Pages were not loaded.') raise
python
{ "resource": "" }
q42420
render_page
train
def render_page(path): """Internal interface to the page view. :param path: Page path. :returns: The rendered template. """ try: page = Page.get_by_url(request.path) except NoResultFound: abort(404) return render_template( [page.template_name, current_app.config['PAGES_DEFAULT_TEMPLATE']], page=page)
python
{ "resource": "" }
q42421
handle_not_found
train
def handle_not_found(exception, **extra): """Custom blueprint exception handler.""" assert isinstance(exception, NotFound) page = Page.query.filter(db.or_(Page.url == request.path, Page.url == request.path + "/")).first() if page: _add_url_rule(page.url) return render_template( [ page.template_name, current_app.config['PAGES_DEFAULT_TEMPLATE'] ], page=page ) elif 'wrapped' in extra: return extra['wrapped'](exception) else: return exception
python
{ "resource": "" }
q42422
_add_url_rule
train
def _add_url_rule(url_or_urls): """Register URL rule to application URL map.""" old = current_app._got_first_request # This is bit of cheating to overcome @flask.app.setupmethod decorator. current_app._got_first_request = False if isinstance(url_or_urls, six.string_types): url_or_urls = [url_or_urls] map(lambda url: current_app.add_url_rule(url, 'invenio_pages.view', view), url_or_urls) current_app._got_first_request = old
python
{ "resource": "" }
q42423
CollaborationsAPI.list_members_of_collaboration
train
def list_members_of_collaboration(self, id, include=None): """ List members of a collaboration. List the collaborators of a given collaboration """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - include """- "collaborator_lti_id": Optional information to include with each member. Represents an identifier to be used for the member in an LTI context. - "avatar_image_url": Optional information to include with each member. The url for the avatar of a collaborator with type 'user'.""" if include is not None: self._validate_enum(include, ["collaborator_lti_id", "avatar_image_url"]) params["include"] = include self.logger.debug("GET /api/v1/collaborations/{id}/members with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/collaborations/{id}/members".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42424
DiscussionTopicsAPI.list_discussion_topics_courses
train
def list_discussion_topics_courses(self, course_id, exclude_context_module_locked_topics=None, include=None, only_announcements=None, order_by=None, scope=None, search_term=None): """ List discussion topics. Returns the paginated list of discussion topics for this course or group. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - include """If "all_dates" is passed, all dates associated with graded discussions' assignments will be included.""" if include is not None: self._validate_enum(include, ["all_dates"]) params["include"] = include # OPTIONAL - order_by """Determines the order of the discussion topic list. Defaults to "position".""" if order_by is not None: self._validate_enum(order_by, ["position", "recent_activity"]) params["order_by"] = order_by # OPTIONAL - scope """Only return discussion topics in the given state(s). Defaults to including all topics. Filtering is done after pagination, so pages may be smaller than requested if topics are filtered. Can pass multiple states as comma separated string.""" if scope is not None: self._validate_enum(scope, ["locked", "unlocked", "pinned", "unpinned"]) params["scope"] = scope # OPTIONAL - only_announcements """Return announcements instead of discussion topics. Defaults to false""" if only_announcements is not None: params["only_announcements"] = only_announcements # OPTIONAL - search_term """The partial title of the discussion topics to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - exclude_context_module_locked_topics """For students, exclude topics that are locked by module progression. Defaults to false.""" if exclude_context_module_locked_topics is not None: params["exclude_context_module_locked_topics"] = exclude_context_module_locked_topics self.logger.debug("GET /api/v1/courses/{course_id}/discussion_topics with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/discussion_topics".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42425
DiscussionTopicsAPI.create_new_discussion_topic_courses
train
def create_new_discussion_topic_courses(self, course_id, allow_rating=None, assignment=None, attachment=None, delayed_post_at=None, discussion_type=None, group_category_id=None, is_announcement=None, lock_at=None, message=None, only_graders_can_rate=None, pinned=None, podcast_enabled=None, podcast_has_student_posts=None, position_after=None, published=None, require_initial_post=None, sort_by_rating=None, title=None): """ Create a new discussion topic. Create an new discussion topic for the course or group. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - title """no description""" if title is not None: data["title"] = title # OPTIONAL - message """no description""" if message is not None: data["message"] = message # OPTIONAL - discussion_type """The type of discussion. Defaults to side_comment if not value is given. Accepted values are 'side_comment', for discussions that only allow one level of nested comments, and 'threaded' for fully threaded discussions.""" if discussion_type is not None: self._validate_enum(discussion_type, ["side_comment", "threaded"]) data["discussion_type"] = discussion_type # OPTIONAL - published """Whether this topic is published (true) or draft state (false). Only teachers and TAs have the ability to create draft state topics.""" if published is not None: data["published"] = published # OPTIONAL - delayed_post_at """If a timestamp is given, the topic will not be published until that time.""" if delayed_post_at is not None: data["delayed_post_at"] = delayed_post_at # OPTIONAL - lock_at """If a timestamp is given, the topic will be scheduled to lock at the provided timestamp. If the timestamp is in the past, the topic will be locked.""" if lock_at is not None: data["lock_at"] = lock_at # OPTIONAL - podcast_enabled """If true, the topic will have an associated podcast feed.""" if podcast_enabled is not None: data["podcast_enabled"] = podcast_enabled # OPTIONAL - podcast_has_student_posts """If true, the podcast will include posts from students as well. Implies podcast_enabled.""" if podcast_has_student_posts is not None: data["podcast_has_student_posts"] = podcast_has_student_posts # OPTIONAL - require_initial_post """If true then a user may not respond to other replies until that user has made an initial reply. Defaults to false.""" if require_initial_post is not None: data["require_initial_post"] = require_initial_post # OPTIONAL - assignment """To create an assignment discussion, pass the assignment parameters as a sub-object. See the {api:AssignmentsApiController#create Create an Assignment API} for the available parameters. The name parameter will be ignored, as it's taken from the discussion title. If you want to make a discussion that was an assignment NOT an assignment, pass set_assignment = false as part of the assignment object""" if assignment is not None: data["assignment"] = assignment # OPTIONAL - is_announcement """If true, this topic is an announcement. It will appear in the announcement's section rather than the discussions section. This requires announcment-posting permissions.""" if is_announcement is not None: data["is_announcement"] = is_announcement # OPTIONAL - pinned """If true, this topic will be listed in the "Pinned Discussion" section""" if pinned is not None: data["pinned"] = pinned # OPTIONAL - position_after """By default, discussions are sorted chronologically by creation date, you can pass the id of another topic to have this one show up after the other when they are listed.""" if position_after is not None: data["position_after"] = position_after # OPTIONAL - group_category_id """If present, the topic will become a group discussion assigned to the group.""" if group_category_id is not None: data["group_category_id"] = group_category_id # OPTIONAL - allow_rating """If true, users will be allowed to rate entries.""" if allow_rating is not None: data["allow_rating"] = allow_rating # OPTIONAL - only_graders_can_rate """If true, only graders will be allowed to rate entries.""" if only_graders_can_rate is not None: data["only_graders_can_rate"] = only_graders_can_rate # OPTIONAL - sort_by_rating """If true, entries will be sorted by rating.""" if sort_by_rating is not None: data["sort_by_rating"] = sort_by_rating # OPTIONAL - attachment """A multipart/form-data form-field-style attachment. Attachments larger than 1 kilobyte are subject to quota restrictions.""" if attachment is not None: data["attachment"] = attachment self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42426
DiscussionTopicsAPI.delete_topic_groups
train
def delete_topic_groups(self, group_id, topic_id): """ Delete a topic. Deletes the discussion topic. This will also delete the assignment, if it's an assignment discussion. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - topic_id """ID""" path["topic_id"] = topic_id self.logger.debug("DELETE /api/v1/groups/{group_id}/discussion_topics/{topic_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/groups/{group_id}/discussion_topics/{topic_id}".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42427
DiscussionTopicsAPI.rate_entry_courses
train
def rate_entry_courses(self, topic_id, entry_id, course_id, rating=None): """ Rate entry. Rate a discussion entry. On success, the response will be 204 No Content with an empty body. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - topic_id """ID""" path["topic_id"] = topic_id # REQUIRED - PATH - entry_id """ID""" path["entry_id"] = entry_id # OPTIONAL - rating """A rating to set on this entry. Only 0 and 1 are accepted.""" if rating is not None: data["rating"] = rating self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics/{topic_id}/entries/{entry_id}/rating with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics/{topic_id}/entries/{entry_id}/rating".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42428
cli
train
def cli(**settings): """Notify about new reviews in AppStore and Google Play in slack. Launch command using supervisor or using screen/tmux/etc. Reviews are fetched for multiple apps and languages in --beat=300 interval. """ setup_logging(settings) settings = setup_languages(settings) channels = setup_channel_map(settings) app = CriticApp(**dict(settings, channels=channels)) if settings['sentry_dsn']: app.sentry_client = Client(settings['sentry_dsn']) logger.debug('Errors are reported to %s' % settings['sentry_dsn']) else: app.sentry_client = None if settings['version']: click.echo('Version %s' % critics.__version__) return if not (settings['ios'] or settings['android']): click.echo('Please choose either --ios or --android') return loop = tornado.ioloop.IOLoop.instance() if app.load_model(): logger.debug('Model loaded OK, not skipping notify on first run') notify = True else: notify = False if settings['ios']: logger.info('Tracking IOS apps: %s', ', '.join(settings['ios'])) itunes = tornado.ioloop.PeriodicCallback(partial(app.poll_store, 'ios'), 1000 * settings['beat'], loop) itunes.start() if settings['android']: logger.info('Tracking Android apps: %s', ', '.join(settings['android'])) google_play = tornado.ioloop.PeriodicCallback(partial(app.poll_store, 'android'), 1000 * settings['beat'], loop) google_play.start() echo_channel_map(channels) if settings['ios']: app.poll_store('ios', notify=notify) if settings['android']: app.poll_store('android', notify=notify) if settings['stats']: port = int(settings['stats']) logger.debug('Serving metrics server on port %s' % port) start_http_server(port) if settings['daemonize']: loop.start()
python
{ "resource": "" }
q42429
Source.create_fa
train
def create_fa(self): """Create a FASTA file with extracted sequences. """ if self._seqs is None: os.symlink(self._fa0_fn, self._fa_fn) else: in_seqs = pyfaidx.Fasta(self._fa0_fn) with open(self._fa_fn, "w+") as g: for seq_desc in self._seqs: x = in_seqs[seq_desc] name, seq = x.name, str(x) g.write(">" + name + "\n") n = 80 seq_split = "\n".join([seq[i:i + n] for i in range(0, len(seq), n)]) g.write(seq_split + "\n")
python
{ "resource": "" }
q42430
Source.recode_sam_reads
train
def recode_sam_reads( sam_fn, fastq_rnf_fo, fai_fo, genome_id, number_of_read_tuples=10**9, simulator_name=None, allow_unmapped=False, ): """Transform a SAM file to RNF-compatible FASTQ. Args: sam_fn (str): SAM/BAM file - file name. fastq_rnf_fo (str): Output FASTQ file - file object. fai_fo (str): FAI index of the reference genome - file object. genome_id (int): Genome ID for RNF. number_of_read_tuples (int): Expected number of read tuples (to set width of read tuple id). simulator_name (str): Name of the simulator. Used for comment in read tuple name. allow_unmapped (bool): Allow unmapped reads. Raises: NotImplementedError """ fai_index = rnftools.utils.FaIdx(fai_fo) # last_read_tuple_name=[] read_tuple_id_width = len(format(number_of_read_tuples, 'x')) fq_creator = rnftools.rnfformat.FqCreator( fastq_fo=fastq_rnf_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator=simulator_name, ) # todo: check if clipping corrections is well implemented cigar_reg_shift = re.compile("([0-9]+)([MDNP=X])") # todo: other upac codes reverse_complement_dict = { "A": "T", "T": "A", "C": "G", "G": "C", "N": "N", } read_tuple_id = 0 last_read_tuple_name = None with pysam.AlignmentFile( sam_fn, check_header=False, ) as samfile: for alignment in samfile: if alignment.query_name != last_read_tuple_name and last_read_tuple_name is not None: read_tuple_id += 1 last_read_tuple_name = alignment.query_name if alignment.is_unmapped: rnftools.utils.error( "SAM files used for conversion should not contain unaligned segments. " "This condition is broken by read tuple " "'{}' in file '{}'.".format(alignment.query_name, sam_fn), program="RNFtools", subprogram="MIShmash", exception=NotImplementedError, ) if alignment.is_reverse: direction = "R" bases = "".join([reverse_complement_dict[nucl] for nucl in alignment.seq[::-1]]) qualities = str(alignment.qual[::-1]) else: direction = "F" bases = alignment.seq[:] qualities = str(alignment.qual[:]) # todo: are chromosomes in bam sorted correctly (the same order as in FASTA)? if fai_index.dict_chr_ids != {}: chr_id = fai_index.dict_chr_ids[samfile.getrname(alignment.reference_id)] else: chr_id = "0" left = int(alignment.reference_start) + 1 right = left - 1 for (steps, operation) in cigar_reg_shift.findall(alignment.cigarstring): right += int(steps) segment = rnftools.rnfformat.Segment( genome_id=genome_id, chr_id=chr_id, direction=direction, left=left, right=right, ) fq_creator.add_read( read_tuple_id=read_tuple_id, bases=bases, qualities=qualities, segments=[segment], ) fq_creator.flush_read_tuple()
python
{ "resource": "" }
q42431
instantiate
train
def instantiate(config): """ instantiate all registered vodka applications Args: config (dict or MungeConfig): configuration object """ for handle, cfg in list(config["apps"].items()): if not cfg.get("enabled", True): continue app = get_application(handle) instances[app.handle] = app(cfg)
python
{ "resource": "" }
q42432
compile_resource
train
def compile_resource(resource): """ Return compiled regex for resource matching """ return re.compile("^" + trim_resource(re.sub(r":(\w+)", r"(?P<\1>[\w-]+?)", resource)) + r"(\?(?P<querystring>.*))?$")
python
{ "resource": "" }
q42433
handle
train
def handle(data_type, data, data_id=None, caller=None): """ execute all data handlers on the specified data according to data type Args: data_type (str): data type handle data (dict or list): data Kwargs: data_id (str): can be used to differentiate between different data sets of the same data type. If not specified will default to the data type caller (object): if specified, holds the object or function that is trying to handle data Returns: dict or list - data after handlers have been executed on it """ if not data_id: data_id = data_type # instantiate handlers for data type if they havent been yet if data_id not in _handlers: _handlers[data_id] = dict( [(h.handle, h) for h in handlers.instantiate_for_data_type(data_type, data_id=data_id)]) for handler in list(_handlers[data_id].values()): try: data = handler(data, caller=caller) except Exception as inst: vodka.log.error("Data handler '%s' failed with error" % handler) vodka.log.error(traceback.format_exc()) return data
python
{ "resource": "" }
q42434
ReadingListMixin.validate_query
train
def validate_query(self, query): """Confirm query exists given common filters.""" if query is None: return query query = self.update_reading_list(query) return query
python
{ "resource": "" }
q42435
ReadingListMixin.get_validated_augment_query
train
def get_validated_augment_query(self, augment_query=None): """ Common rules for reading list augmentation hierarchy. 1. Sponsored Content. 2. Video Content. """ augment_query = self.validate_query(augment_query) # Given an invalid query, reach for a Sponsored query. if not augment_query: augment_query = self.validate_query(Content.search_objects.sponsored()) # Given an invalid Sponsored query, reach for a Video query. if not augment_query: reading_list_config = getattr(settings, "READING_LIST_CONFIG", {}) excluded_channel_ids = reading_list_config.get("excluded_channel_ids", []) augment_query = self.validate_query(Content.search_objects.evergreen_video( excluded_channel_ids=excluded_channel_ids )) return augment_query
python
{ "resource": "" }
q42436
ReadingListMixin.augment_reading_list
train
def augment_reading_list(self, primary_query, augment_query=None, reverse_negate=False): """Apply injected logic for slicing reading lists with additional content.""" primary_query = self.validate_query(primary_query) augment_query = self.get_validated_augment_query(augment_query=augment_query) try: # We use this for cases like recent where queries are vague. if reverse_negate: primary_query = primary_query.filter(NegateQueryFilter(augment_query)) else: augment_query = augment_query.filter(NegateQueryFilter(primary_query)) augment_query = randomize_es(augment_query) return FirstSlotSlicer(primary_query, augment_query) except TransportError: return primary_query
python
{ "resource": "" }
q42437
ReadingListMixin.update_reading_list
train
def update_reading_list(self, reading_list): """Generic behaviors for reading lists before being rendered.""" # remove the current piece of content from the query. reading_list = reading_list.filter( ~es_filter.Ids(values=[self.id]) ) # remove excluded document types from the query. reading_list_config = getattr(settings, "READING_LIST_CONFIG", {}) excluded_doc_types = reading_list_config.get("excluded_doc_types", []) for obj in excluded_doc_types: reading_list = reading_list.filter(~es_filter.Type(value=obj)) return reading_list
python
{ "resource": "" }
q42438
ReadingListMixin.get_reading_list_context
train
def get_reading_list_context(self, **kwargs): """Returns the context dictionary for a given reading list.""" reading_list = None context = { "name": "", "content": reading_list, "targeting": {}, "videos": [] } if self.reading_list_identifier == "popular": reading_list = popular_content() context.update({"name": self.reading_list_identifier}) # Popular is augmented. reading_list = self.augment_reading_list(reading_list) context.update({"content": reading_list}) return context if self.reading_list_identifier.startswith("specialcoverage"): special_coverage = SpecialCoverage.objects.get_by_identifier( self.reading_list_identifier ) reading_list = special_coverage.get_content().query( SponsoredBoost(field_name="tunic_campaign_id") ).sort("_score", "-published") context["targeting"]["dfp_specialcoverage"] = special_coverage.slug if special_coverage.tunic_campaign_id: context["tunic_campaign_id"] = special_coverage.tunic_campaign_id context["targeting"].update({ "dfp_campaign_id": special_coverage.tunic_campaign_id }) # We do not augment sponsored special coverage lists. reading_list = self.update_reading_list(reading_list) else: reading_list = self.augment_reading_list(reading_list) context.update({ "name": special_coverage.name, "videos": special_coverage.videos, "content": reading_list }) return context if self.reading_list_identifier.startswith("section"): section = Section.objects.get_by_identifier(self.reading_list_identifier) reading_list = section.get_content() reading_list = self.augment_reading_list(reading_list) context.update({ "name": section.name, "content": reading_list }) return context reading_list = Content.search_objects.search() reading_list = self.augment_reading_list(reading_list, reverse_negate=True) context.update({ "name": "Recent News", "content": reading_list }) return context
python
{ "resource": "" }
q42439
Frontmatter.read
train
def read(cls, string): """Returns dict with separated frontmatter from string. Returned dict keys: attributes -- extracted YAML attributes in dict form. body -- string contents below the YAML separators frontmatter -- string representation of YAML """ fmatter = "" body = "" result = cls._regex.search(string) if result: fmatter = result.group(1) body = result.group(2) return { "attributes": yaml.load(fmatter), "body": body, "frontmatter": fmatter, }
python
{ "resource": "" }
q42440
_analyze_case
train
def _analyze_case(model_dir, bench_dir, config): """ Generates statistics from the timing summaries """ model_timings = set(glob.glob(os.path.join(model_dir, "*" + config["timing_ext"]))) if bench_dir is not None: bench_timings = set(glob.glob(os.path.join(bench_dir, "*" + config["timing_ext"]))) else: bench_timings = set() if not len(model_timings): return dict() model_stats = generate_timing_stats(model_timings, config['timing_vars']) bench_stats = generate_timing_stats(bench_timings, config['timing_vars']) return dict(model=model_stats, bench=bench_stats)
python
{ "resource": "" }
q42441
generate_timing_stats
train
def generate_timing_stats(file_list, var_list): """ Parse all of the timing files, and generate some statistics about the run. Args: file_list: A list of timing files to parse var_list: A list of variables to look for in the timing file Returns: A dict containing values that have the form: [mean, min, max, mean, standard deviation] """ timing_result = dict() timing_summary = dict() for file in file_list: timing_result[file] = functions.parse_gptl(file, var_list) for var in var_list: var_time = [] for f, data in timing_result.items(): try: var_time.append(data[var]) except: continue if len(var_time): timing_summary[var] = {'mean': np.mean(var_time), 'max': np.max(var_time), 'min': np.min(var_time), 'std': np.std(var_time)} return timing_summary
python
{ "resource": "" }
q42442
weak_scaling
train
def weak_scaling(timing_stats, scaling_var, data_points): """ Generate data for plotting weak scaling. The data points keep a constant amount of work per processor for each data point. Args: timing_stats: the result of the generate_timing_stats function scaling_var: the variable to select from the timing_stats dictionary (can be provided in configurations via the 'scaling_var' key) data_points: the list of size and processor counts to use as data (can be provided in configurations via the 'weak_scaling_points' key) Returns: A dict of the form: {'bench' : {'mins' : [], 'means' : [], 'maxs' : []}, 'model' : {'mins' : [], 'means' : [], 'maxs' : []}, 'proc_counts' : []} """ timing_data = dict() proc_counts = [] bench_means = [] bench_mins = [] bench_maxs = [] model_means = [] model_mins = [] model_maxs = [] for point in data_points: size = point[0] proc = point[1] try: model_data = timing_stats[size][proc]['model'][scaling_var] bench_data = timing_stats[size][proc]['bench'][scaling_var] except KeyError: continue proc_counts.append(proc) model_means.append(model_data['mean']) model_mins.append(model_data['min']) model_maxs.append(model_data['max']) bench_means.append(bench_data['mean']) bench_mins.append(bench_data['min']) bench_maxs.append(bench_data['max']) timing_data['bench'] = dict(mins=bench_mins, means=bench_means, maxs=bench_maxs) timing_data['model'] = dict(mins=model_mins, means=model_means, maxs=model_maxs) timing_data['proc_counts'] = [int(pc[1:]) for pc in proc_counts] return timing_data
python
{ "resource": "" }
q42443
generate_scaling_plot
train
def generate_scaling_plot(timing_data, title, ylabel, description, plot_file): """ Generate a scaling plot. Args: timing_data: data returned from a `*_scaling` method title: the title of the plot ylabel: the y-axis label of the plot description: a description of the plot plot_file: the file to write out to Returns: an image element containing the plot file and metadata """ proc_counts = timing_data['proc_counts'] if len(proc_counts) > 2: plt.figure(figsize=(10, 8), dpi=150) plt.title(title) plt.xlabel("Number of processors") plt.ylabel(ylabel) for case, case_color in zip(['bench', 'model'], ['#91bfdb', '#fc8d59']): case_data = timing_data[case] means = case_data['means'] mins = case_data['mins'] maxs = case_data['maxs'] plt.fill_between(proc_counts, mins, maxs, facecolor=case_color, alpha=0.5) plt.plot(proc_counts, means, 'o-', color=case_color, label=case) plt.legend(loc='best') else: plt.figure(figsize=(5, 3)) plt.axis('off') plt.text(0.4, 0.8, "ERROR:") plt.text(0.0, 0.6, "Not enough data points to draw scaling plot") plt.text(0.0, 0.44, "To generate this data rerun BATS with the") plt.text(0.0, 0.36, "performance option enabled.") if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return elements.image(title, description, os.path.basename(plot_file))
python
{ "resource": "" }
q42444
_InvenioPagesState.jinja_env
train
def jinja_env(self): """Create a sandboxed Jinja environment.""" if self._jinja_env is None: self._jinja_env = SandboxedEnvironment( extensions=[ 'jinja2.ext.autoescape', 'jinja2.ext.with_', ], autoescape=True, ) self._jinja_env.globals['url_for'] = url_for # Load whitelisted configuration variables. for var in self.app.config['PAGES_WHITELIST_CONFIG_KEYS']: self._jinja_env.globals[var] = self.app.config.get(var) return self._jinja_env
python
{ "resource": "" }
q42445
_InvenioPagesState.render_template
train
def render_template(self, source, **kwargs_context): r"""Render a template string using sandboxed environment. :param source: A string containing the page source. :param \*\*kwargs_context: The context associated with the page. :returns: The rendered template. """ return self.jinja_env.from_string(source).render(kwargs_context)
python
{ "resource": "" }
q42446
InvenioPages.wrap_errorhandler
train
def wrap_errorhandler(app): """Wrap error handler. :param app: The Flask application. """ try: existing_handler = app.error_handler_spec[None][404][NotFound] except (KeyError, TypeError): existing_handler = None if existing_handler: app.error_handler_spec[None][404][NotFound] = \ lambda error: handle_not_found(error, wrapped=existing_handler) else: app.error_handler_spec.setdefault(None, {}).setdefault(404, {}) app.error_handler_spec[None][404][NotFound] = handle_not_found
python
{ "resource": "" }
q42447
ConferencesAPI.list_conferences_groups
train
def list_conferences_groups(self, group_id): """ List conferences. Retrieve the list of conferences for this context This API returns a JSON object containing the list of conferences, the key for the list of conferences is "conferences" """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id self.logger.debug("GET /api/v1/groups/{group_id}/conferences with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/conferences".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42448
QuizAssignmentOverridesAPI.retrieve_assignment_overridden_dates_for_quizzes
train
def retrieve_assignment_overridden_dates_for_quizzes(self, course_id, quiz_assignment_overrides_0_quiz_ids=None): """ Retrieve assignment-overridden dates for quizzes. Retrieve the actual due-at, unlock-at, and available-at dates for quizzes based on the assignment overrides active for the current API user. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - quiz_assignment_overrides[0][quiz_ids] """An array of quiz IDs. If omitted, overrides for all quizzes available to the operating user will be returned.""" if quiz_assignment_overrides_0_quiz_ids is not None: params["quiz_assignment_overrides[0][quiz_ids]"] = quiz_assignment_overrides_0_quiz_ids self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/assignment_overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/assignment_overrides".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42449
KNeighborsClassifier.fit
train
def fit(self, X, y): """Fit the model using X as training data and y as target values""" self._data = X self._classes = np.unique(y) self._labels = y self._is_fitted = True
python
{ "resource": "" }
q42450
KNeighborsClassifier.predict
train
def predict(self, X): """Predict the class labels for the provided data Parameters ---------- X : array-like, shape (n_query, n_features). Test samples. Returns ------- y : array of shape [n_samples] Class labels for each data sample. """ # TODO: Make classification of multiple samples a bit more effective... if X.ndim > 1 and X.shape[1] != 1: out = [] for x in X: out += self.predict(x) return out X = X.flatten() if self.metric == 'minkowski': dists = np.sum(np.abs(self._data - X) ** self.p, axis=1) else: # TODO: Implement other metrics. raise ValueError("Only Minkowski distance metric implemented...") argument = np.argsort(dists) labels = self._labels[argument[:self.n_neighbors]] if self.weights == 'distance': weights = 1 / dists[argument[:self.n_neighbors]] out = np.zeros((len(self._classes), ), 'float') for i, c in enumerate(self._classes): out[i] = np.sum(weights[labels == c]) out /= np.sum(out) y_pred = self._labels[np.argmax(out)] else: y_pred, _ = mode(labels) return y_pred.tolist()
python
{ "resource": "" }
q42451
build_url
train
def build_url(component, filename, **values): """ search bower asset and build url :param component: bower component (package) :type component: str :param filename: filename in bower component - can contain directories (like dist/jquery.js) :type filename: str :param values: additional url parameters :type values: dict[str, str] :return: url :rtype: str | None """ root = current_app.config['BOWER_COMPONENTS_ROOT'] bower_data = None package_data = None # check if component exists in bower_components directory if not os.path.isdir(os.path.join(current_app.root_path, root, component)): # FallBack to default url_for flask return None # load bower.json of specified component bower_file_path = os.path.join(current_app.root_path, root, component, 'bower.json') if os.path.exists(bower_file_path): with open(bower_file_path, 'r') as bower_file: bower_data = json.load(bower_file) # check if package.json exists and load package.json data package_file_path = os.path.join(current_app.root_path, root, component, 'package.json') if os.path.exists(package_file_path): with open(package_file_path, 'r') as package_file: package_data = json.load(package_file) # check if specified file actually exists if not os.path.exists(os.path.join(current_app.root_path, root, component, filename)): return None # check if minified file exists (by pattern <filename>.min.<ext> # returns filename if successful if current_app.config['BOWER_TRY_MINIFIED']: if '.min.' not in filename: minified_filename = '%s.min.%s' % tuple(filename.rsplit('.', 1)) minified_path = os.path.join(root, component, minified_filename) if os.path.exists(os.path.join(current_app.root_path, minified_path)): filename = minified_filename # determine version of component and append as ?version= parameter to allow cache busting if current_app.config['BOWER_QUERYSTRING_REVVING']: if bower_data is not None and 'version' in bower_data: values['version'] = bower_data['version'] elif package_data is not None and 'version' in package_data: values['version'] = package_data['version'] else: values['version'] = os.path.getmtime(os.path.join(current_app.root_path, root, component, filename)) return url_for('bower.serve', component=component, filename=filename, **values)
python
{ "resource": "" }
q42452
init_ixn
train
def init_ixn(api, logger, install_dir=None): """ Create IXN object. :param api: tcl/python/rest :type api: trafficgenerator.tgn_utils.ApiType :param logger: logger object :param install_dir: IXN installation directory :return: IXN object """ if api == ApiType.tcl: api_wrapper = IxnTclWrapper(logger, install_dir) elif api == ApiType.python: api_wrapper = IxnPythonWrapper(logger, install_dir) elif api == ApiType.rest: api_wrapper = IxnRestWrapper(logger) else: raise TgnError('{} API not supported - use Tcl, python or REST'.format(api)) return IxnApp(logger, api_wrapper)
python
{ "resource": "" }
q42453
IxnApp.disconnect
train
def disconnect(self): """ Disconnect from chassis and server. """ if self.root.ref is not None: self.api.disconnect() self.root = None
python
{ "resource": "" }
q42454
Timeout.start
train
def start(self): """Schedule the timeout. This is called on construction, so it should not be called explicitly, unless the timer has been canceled.""" assert not self._timer, '%r is already started; to restart it, cancel it first' % self loop = evergreen.current.loop current = evergreen.current.task if self.seconds is None or self.seconds < 0: # "fake" timeout (never expires) self._timer = None elif self.exception is None or isinstance(self.exception, bool): # timeout that raises self self._timer = loop.call_later(self.seconds, self._timer_cb, current.throw, self) else: # regular timeout with user-provided exception self._timer = loop.call_later(self.seconds, self._timer_cb, current.throw, self.exception)
python
{ "resource": "" }
q42455
FqCreator.flush_read_tuple
train
def flush_read_tuple(self): """Flush the internal buffer of reads. """ if not self.is_empty(): suffix_comment_buffer = [] if self._info_simulator is not None: suffix_comment_buffer.append(self._info_simulator) if self._info_reads_in_tuple: # todo: orientation (FF, FR, etc.) # orientation="".join([]) suffix_comment_buffer.append("reads-in-tuple:{}".format(len(self.seqs_bases))) if len(suffix_comment_buffer) != 0: suffix_comment = "[{}]".format(",".join(suffix_comment_buffer)) else: suffix_comment = "" rnf_name = self._rnf_profile.get_rnf_name( rnftools.rnfformat.ReadTuple( segments=self.segments, read_tuple_id=self.current_read_tuple_id, suffix=suffix_comment, ) ) fq_reads = [ os.linesep.join( [ "@{rnf_name}{read_suffix}".format( rnf_name=rnf_name, read_suffix="/{}".format(str(i + 1)) if len(self.seqs_bases) > 1 else "", ), self.seqs_bases[i], "+", self.seqs_qualities[i], ] ) for i in range(len(self.seqs_bases)) ] self._fq_file.write(os.linesep.join(fq_reads)) self._fq_file.write(os.linesep) self.empty()
python
{ "resource": "" }
q42456
BayesCategories.add_category
train
def add_category(self, name): """ Adds a bayes category that we can later train :param name: name of the category :type name: str :return: the requested category :rtype: BayesCategory """ category = BayesCategory(name) self.categories[name] = category return category
python
{ "resource": "" }
q42457
FilteredStream.line_is_interesting
train
def line_is_interesting(self, line): """Return True, False, or None. True means always output, False means never output, None means output only if there are interesting lines. """ if line.startswith('Name'): return None if line.startswith('--------'): return None if line.startswith('TOTAL'): return None if '100%' in line: return False if line == '\n': return None if self._last_line_was_printable else False return True
python
{ "resource": "" }
q42458
Graph.predecessors
train
def predecessors(self, node, exclude_compressed=True): """ Returns the list of predecessors of a given node Parameters ---------- node : str The target node exclude_compressed : boolean If true, compressed nodes are excluded from the predecessors list Returns ------- list List of predecessors nodes """ preds = super(Graph, self).predecessors(node) if exclude_compressed: return [n for n in preds if not self.node[n].get('compressed', False)] else: return preds
python
{ "resource": "" }
q42459
Graph.successors
train
def successors(self, node, exclude_compressed=True): """ Returns the list of successors of a given node Parameters ---------- node : str The target node exclude_compressed : boolean If true, compressed nodes are excluded from the successors list Returns ------- list List of successors nodes """ succs = super(Graph, self).successors(node) if exclude_compressed: return [n for n in succs if not self.node[n].get('compressed', False)] else: return succs
python
{ "resource": "" }
q42460
Graph.compress
train
def compress(self, setup): """ Returns the compressed graph according to the given experimental setup Parameters ---------- setup : :class:`caspo.core.setup.Setup` Experimental setup used to compress the graph Returns ------- caspo.core.graph.Graph Compressed graph """ designated = set(setup.nodes) zipped = self.copy() marked = [(n, d) for n, d in self.nodes(data=True) if n not in designated and not d.get('compressed', False)] while marked: for node, _ in sorted(marked): backward = zipped.predecessors(node) forward = zipped.successors(node) if not backward or (len(backward) == 1 and not backward[0] in forward): self.__merge_source_targets(node, zipped) elif not forward or (len(forward) == 1 and not forward[0] in backward): self.__merge_target_sources(node, zipped) else: designated.add(node) marked = [(n, d) for n, d in self.nodes(data=True) if n not in designated and not d.get('compressed', False)] not_compressed = [(n, d) for n, d in zipped.nodes(data=True) if not d.get('compressed', False)] return zipped.subgraph([n for n, _ in not_compressed])
python
{ "resource": "" }
q42461
RnfProfile.combine
train
def combine(*rnf_profiles): """Combine more profiles and set their maximal values. Args: *rnf_profiles (rnftools.rnfformat.RnfProfile): RNF profile. """ for rnf_profile in rnf_profiles: self.prefix_width = max(self.prefix_width, rnf_profile.prefix_width) self.read_tuple_id_width = max(self.read_tuple_id_width, rnf_profile.read_tuple_id_width) self.genome_id_width = max(self.genome_id_width, rnf_profile.genome_id_width) self.chr_id_width = max(self.chr_id_width, rnf_profile.chr_id_width) self.coor_width = max(self.coor_width, rnf_profile.coor_width)
python
{ "resource": "" }
q42462
RnfProfile.load
train
def load(self, read_tuple_name): """Load RNF values from a read tuple name. Args: read_tuple_name (str): Read tuple name which the values are taken from. """ self.prefix_width = 0 self.read_tuple_id_width = 0 self.genome_id_width = 0 self.chr_id_width = 0 self.coor_width = 0 parts = read_tuple_name.split("__") self.prefix_width = len(parts[0]) self.read_tuple_id_width = len(parts[1]) segments = parts[2][1:-1].split("),(") for segment in segments: int_widths = list(map(len, segment.split(","))) self.genome_id_width = max(self.genome_id_width, int_widths[0]) self.chr_id_width = max(self.chr_id_width, int_widths[1]) self.coor_width = max(self.coor_width, int_widths[2], int_widths[3])
python
{ "resource": "" }
q42463
RnfProfile.apply
train
def apply(self, read_tuple_name, read_tuple_id=None, synchronize_widths=True): """Apply profile on a read tuple name and update read tuple ID. Args: read_tuple_name (str): Read tuple name to be updated. read_tuple_id (id): New read tuple ID. synchronize_widths (bool): Update widths (in accordance to this profile). """ parts = read_tuple_name.split("__") parts[0] = self._fill_right(parts[0], "-", self.prefix_width) if read_tuple_id is not None: parts[1] = "{:x}".format(read_tuple_id) parts[1] = self._fill_left(parts[1], "0", self.read_tuple_id_width) if synchronize_widths: new_segments = [] segments = parts[2][1:-1].split("),(") for segment in segments: values = segment.split(",") values[0] = values[0].zfill(self.genome_id_width) values[1] = values[1].zfill(self.chr_id_width) values[3] = values[3].zfill(self.coor_width) values[4] = values[4].zfill(self.coor_width) new_segments.append("(" + ",".join(values) + ")") parts[2] = ",".join(new_segments) return "__".join(parts)
python
{ "resource": "" }
q42464
RnfProfile.check
train
def check(self, read_tuple_name): """Check if the given read tuple name satisfies this profile. Args: read_tuple_name (str): Read tuple name. """ parts = read_tuple_name.split("__") if len(parts[0]) != self.prefix_width or len(parts[1]) != self.read_tuple_id_width: return False segments = parts[2][1:-1].split("),(") for segment in segments: int_widths = list(map(len, segment.split(","))) if self.genome_id_width != int_widths[0]: return False if self.chr_id_width != int_widths[1]: return False if self.coor_width != int_widths[3] or self.coor_width != int_widths[4]: return False return True
python
{ "resource": "" }
q42465
Definition.get_column_definition_all
train
def get_column_definition_all(self, table): """Retrieve the column definition statement for all columns in a table.""" # Get complete table definition col_defs = self.get_table_definition(table).split('\n') # Return only column definitions return [i[0:-1].strip().replace(',', ', ') for i in col_defs if i.strip().startswith('`')]
python
{ "resource": "" }
q42466
Definition.get_column_definition
train
def get_column_definition(self, table, column): """Retrieve the column definition statement for a column from a table.""" # Parse column definitions for match for col in self.get_column_definition_all(table): if col.strip('`').startswith(column): return col.strip(',')
python
{ "resource": "" }
q42467
original
train
def original(modname): """ This returns an unpatched version of a module.""" # note that it's not necessary to temporarily install unpatched # versions of all patchable modules during the import of the # module; this is because none of them import each other, except # for threading which imports thread original_name = '__original_module_' + modname if original_name in sys.modules: return sys.modules.get(original_name) # re-import the "pure" module and store it in the global _originals # dict; be sure to restore whatever module had that name already saver = SysModulesSaver((modname,)) sys.modules.pop(modname, None) try: real_mod = __import__(modname, {}, {}, modname.split('.')[:-1]) # save a reference to the unpatched module so it doesn't get lost sys.modules[original_name] = real_mod finally: saver.restore() return sys.modules[original_name]
python
{ "resource": "" }
q42468
patch
train
def patch(**on): """Globally patches certain system modules to be 'cooperaive'. The keyword arguments afford some control over which modules are patched. If no keyword arguments are supplied, all possible modules are patched. If keywords are set to True, only the specified modules are patched. E.g., ``monkey_patch(socket=True, select=True)`` patches only the select and socket modules. Most arguments patch the single module of the same name (os, time, select). The exception is socket, which also patches the ssl module if present. It's safe to call monkey_patch multiple times. """ accepted_args = set(('select', 'socket', 'time')) default_on = on.pop("all", None) for k in on.keys(): if k not in accepted_args: raise TypeError("patch() got an unexpected keyword argument %r" % k) if default_on is None: default_on = not (True in list(on.values())) for modname in accepted_args: on.setdefault(modname, default_on) modules_to_patch = [] if on['select'] and not already_patched.get('select'): modules_to_patch += _select_modules() already_patched['select'] = True if on['socket'] and not already_patched.get('socket'): modules_to_patch += _socket_modules() already_patched['socket'] = True if on['time'] and not already_patched.get('time'): modules_to_patch += _time_modules() already_patched['time'] = True imp.acquire_lock() try: for name, mod in modules_to_patch: orig_mod = sys.modules.get(name) if orig_mod is None: orig_mod = __import__(name) for attr_name in mod.__patched__: patched_attr = getattr(mod, attr_name, None) if patched_attr is not None: setattr(orig_mod, attr_name, patched_attr) finally: imp.release_lock()
python
{ "resource": "" }
q42469
SysModulesSaver.save
train
def save(self, *module_names): """Saves the named modules to the object.""" for modname in module_names: self._saved[modname] = sys.modules.get(modname, None)
python
{ "resource": "" }
q42470
SysModulesSaver.restore
train
def restore(self): """Restores the modules that the saver knows about into sys.modules. """ try: for modname, mod in self._saved.items(): if mod is not None: sys.modules[modname] = mod else: try: del sys.modules[modname] except KeyError: pass finally: imp.release_lock()
python
{ "resource": "" }
q42471
LIVVDict.nested_insert
train
def nested_insert(self, item_list): """ Create a series of nested LIVVDicts given a list """ if len(item_list) == 1: self[item_list[0]] = LIVVDict() elif len(item_list) > 1: if item_list[0] not in self: self[item_list[0]] = LIVVDict() self[item_list[0]].nested_insert(item_list[1:])
python
{ "resource": "" }
q42472
LIVVDict.nested_assign
train
def nested_assign(self, key_list, value): """ Set the value of nested LIVVDicts given a list """ if len(key_list) == 1: self[key_list[0]] = value elif len(key_list) > 1: if key_list[0] not in self: self[key_list[0]] = LIVVDict() self[key_list[0]].nested_assign(key_list[1:], value)
python
{ "resource": "" }
q42473
post_save
train
def post_save(sender, instance, created, **kwargs): """ After save create order instance for sending instance for orderable models. """ # Only create order model instances for # those modules specified in settings. model_label = '.'.join([sender._meta.app_label, sender._meta.object_name]) labels = resolve_labels(model_label) order_field_names = is_orderable(model_label) if order_field_names: orderitem_set = getattr( instance, resolve_order_item_related_set_name(labels) ) if not orderitem_set.all(): fields = {} for order_field_name in order_field_names: fields[order_field_name] = 1 orderitem_set.model.objects.create(item=instance, **fields) sanitize_order(orderitem_set.model)
python
{ "resource": "" }
q42474
parse_children
train
def parse_children(parent): """Recursively parse child tags until match is found""" components = [] for tag in parent.children: matched = parse_tag(tag) if matched: components.append(matched) elif hasattr(tag, 'contents'): components += parse_children(tag) return components
python
{ "resource": "" }
q42475
DataURLFile.save
train
def save(self, path): """ Writes file to a particular location This won't work for cloud environments like Google's App Engine, use with caution ensure to catch exceptions so you can provide informed feedback. prestans does not mask File IO exceptions so your handler can respond better. """ file_handle = open(path, 'wb') file_handle.write(self._file_contents) file_handle.close()
python
{ "resource": "" }
q42476
GradeChangeLogAPI.query_by_assignment
train
def query_by_assignment(self, assignment_id, end_time=None, start_time=None): """ Query by assignment. List grade change events for a given assignment. """ path = {} data = {} params = {} # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/grade_change/assignments/{assignment_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/grade_change/assignments/{assignment_id}".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42477
GradeChangeLogAPI.query_by_student
train
def query_by_student(self, student_id, end_time=None, start_time=None): """ Query by student. List grade change events for a given student. """ path = {} data = {} params = {} # REQUIRED - PATH - student_id """ID""" path["student_id"] = student_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/grade_change/students/{student_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/grade_change/students/{student_id}".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42478
GradeChangeLogAPI.query_by_grader
train
def query_by_grader(self, grader_id, end_time=None, start_time=None): """ Query by grader. List grade change events for a given grader. """ path = {} data = {} params = {} # REQUIRED - PATH - grader_id """ID""" path["grader_id"] = grader_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/grade_change/graders/{grader_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/grade_change/graders/{grader_id}".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42479
Response.register_serializers
train
def register_serializers(self, serializers): """ Adds extra serializers; generally registered during the handler lifecycle """ for new_serializer in serializers: if not isinstance(new_serializer, serializer.Base): msg = "registered serializer %s.%s does not inherit from prestans.serializer.Serializer" % ( new_serializer.__module__, new_serializer.__class__.__name__ ) raise TypeError(msg) self._serializers = self._serializers + serializers
python
{ "resource": "" }
q42480
ContributorReport.is_valid
train
def is_valid(self): """returns `True` if the report should be sent.""" if not self.total: return False if not self.contributor.freelanceprofile.is_freelance: return False return True
python
{ "resource": "" }
q42481
ContributorReport.contributions
train
def contributions(self): """Apply a datetime filter against the contributor's contribution queryset.""" if self._contributions is None: self._contributions = self.contributor.contributions.filter( content__published__gte=self.start, content__published__lt=self.end ) return self._contributions
python
{ "resource": "" }
q42482
ContributorReport.line_items
train
def line_items(self): """Apply a datetime filter against the contributors's line item queryset.""" if self._line_items is None: self._line_items = self.contributor.line_items.filter( payment_date__range=(self.start, self.end) ) return self._line_items
python
{ "resource": "" }
q42483
ContributorReport.deadline
train
def deadline(self): """Return next day as deadline if no deadline provided.""" if not self._deadline: self._deadline = self.now + timezone.timedelta(days=1) return self._deadline
python
{ "resource": "" }
q42484
EmailReport.send_contributor_email
train
def send_contributor_email(self, contributor): """Send an EmailMessage object for a given contributor.""" ContributorReport( contributor, month=self.month, year=self.year, deadline=self._deadline, start=self._start, end=self._end ).send()
python
{ "resource": "" }
q42485
EmailReport.send_mass_contributor_emails
train
def send_mass_contributor_emails(self): """Send report email to all relevant contributors.""" # If the report configuration is not active we only send to the debugging user. for contributor in self.contributors: if contributor.email not in EMAIL_SETTINGS.get("EXCLUDED", []): self.send_contributor_email(contributor)
python
{ "resource": "" }
q42486
EmailReport.contributors
train
def contributors(self): """Property to retrieve or access the list of contributors.""" if not self._contributors: self._contributors = self.get_contributors() return self._contributors
python
{ "resource": "" }
q42487
CrawlElement.xpath_pick_one
train
def xpath_pick_one(self, xpaths): """ Try each of the xpaths successively until a single element is found. If no xpath succeeds then raise the last UnexpectedContentException caught. """ for xpathi, xpath in enumerate(xpaths): try: return self.xpath(xpath, [1, 1])[0] except UnexpectedContentException as e: if xpathi == len(xpaths) - 1: raise
python
{ "resource": "" }
q42488
_PZoneOperationSerializer.get_content_title
train
def get_content_title(self, obj): """Get content's title.""" return Content.objects.get(id=obj.content.id).title
python
{ "resource": "" }
q42489
Dates.is_date
train
def is_date(self): """Determine if a data record is of type DATE.""" dt = DATA_TYPES['date'] if type(self.data) is dt['type'] and '-' in str(self.data) and str(self.data).count('-') == 2: # Separate year, month and day date_split = str(self.data).split('-') y, m, d = date_split[0], date_split[1], date_split[2] # Validate values valid_year, valid_months, valid_days = int(y) in YEARS, int(m) in MONTHS, int(d) in DAYS # Check that all validations are True if all(i is True for i in (valid_year, valid_months, valid_days)): self.type = 'date'.upper() self.len = None return True
python
{ "resource": "" }
q42490
Dates.is_time
train
def is_time(self): """Determine if a data record is of type TIME.""" dt = DATA_TYPES['time'] if type(self.data) is dt['type'] and ':' in str(self.data) and str(self.data).count(':') == 2: # Separate hour, month, second date_split = str(self.data).split(':') h, m, s = date_split[0], date_split[1], date_split[2] # Validate values valid_hour, valid_min, valid_sec = int(h) in HOURS, int(m) in MINUTES, int(float(s)) in SECONDS if all(i is True for i in (valid_hour, valid_min, valid_sec)): self.type = 'time'.upper() self.len = None return True
python
{ "resource": "" }
q42491
Dates.is_year
train
def is_year(self): """Determine if a data record is of type YEAR.""" dt = DATA_TYPES['year'] if dt['min'] and dt['max']: if type(self.data) is dt['type'] and dt['min'] < self.data < dt['max']: self.type = 'year'.upper() self.len = None return True
python
{ "resource": "" }
q42492
Dates._is_date_data
train
def _is_date_data(self, data_type): """Private method for determining if a data record is of type DATE.""" dt = DATA_TYPES[data_type] if isinstance(self.data, dt['type']): self.type = data_type.upper() self.len = None return True
python
{ "resource": "" }
q42493
Barrier.wait
train
def wait(self, timeout=None): """Wait for the barrier. When the specified number of threads have started waiting, they are all simultaneously awoken. If an 'action' was provided for the barrier, one of the threads will have executed that callback prior to returning. Returns an individual index number from 0 to 'parties-1'. """ if timeout is None: timeout = self._timeout with self._cond: self._enter() # Block while the barrier drains. index = self._count self._count += 1 try: if index + 1 == self._parties: # We release the barrier self._release() else: # We wait until someone releases us self._wait(timeout) return index finally: self._count -= 1 # Wake up any threads waiting for barrier to drain. self._exit()
python
{ "resource": "" }
q42494
Barrier.reset
train
def reset(self): """Reset the barrier to the initial state. Any threads currently waiting will get the BrokenBarrier exception raised. """ with self._cond: if self._count > 0: if self._state == 0: #reset the barrier, waking up threads self._state = -1 elif self._state == -2: #was broken, set it to reset state #which clears when the last thread exits self._state = -1 else: self._state = 0 self._cond.notify_all()
python
{ "resource": "" }
q42495
BaseContentDetailView.get
train
def get(self, request, *args, **kwargs): """Override default get function to use token if there is one to retrieve object. If a subclass should use their own GET implementation, token_from_kwargs should be called if that detail view should be accessible via token.""" self.object = self.get_object() allow_anonymous = kwargs.get("allow_anonymous", False) # We only want to redirect is that setting is true, and we're not allowing anonymous users if self.redirect_correct_path and not allow_anonymous: # Also we obviously only want to redirect if the URL is wrong if self.request.path != self.object.get_absolute_url(): return HttpResponsePermanentRedirect(self.object.get_absolute_url()) context = self.get_context_data(object=self.object) response = self.render_to_response(context) # If we have an unpublished article.... if self.object.published is None or self.object.published > timezone.now(): # And the user doesn't have permission to view this if not request.user.is_staff and not allow_anonymous: response = redirect_unpublished_to_login_or_404( request=request, next_url=self.object.get_absolute_url(), next_params=request.GET) # Never cache unpublished articles add_never_cache_headers(response) else: response["Vary"] = "Accept-Encoding" return response
python
{ "resource": "" }
q42496
host
train
def host(value): """ Validates that the value is a valid network location """ if not value: return (True, "") try: host,port = value.split(":") except ValueError as _: return (False, "value needs to be <host>:<port>") try: int(port) except ValueError as _: return (False, "port component of the host address needs to be a number") return (True, "")
python
{ "resource": "" }
q42497
QuizReportsAPI.retrieve_all_quiz_reports
train
def retrieve_all_quiz_reports(self, quiz_id, course_id, includes_all_versions=None): """ Retrieve all quiz reports. Returns a list of all available reports. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - quiz_id """ID""" path["quiz_id"] = quiz_id # OPTIONAL - includes_all_versions """Whether to retrieve reports that consider all the submissions or only the most recent. Defaults to false, ignored for item_analysis reports.""" if includes_all_versions is not None: params["includes_all_versions"] = includes_all_versions self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42498
Setup.clampings_iter
train
def clampings_iter(self, cues=None): """ Iterates over all possible clampings of this experimental setup Parameters ---------- cues : Optional[iterable] If given, restricts clampings over given species names Yields ------ caspo.core.clamping.Clamping The next clamping with respect to the experimental setup """ s = cues or list(self.stimuli + self.inhibitors) clampings = it.chain.from_iterable(it.combinations(s, r) for r in xrange(len(s) + 1)) literals_tpl = {} for stimulus in self.stimuli: literals_tpl[stimulus] = -1 for c in clampings: literals = literals_tpl.copy() for cues in c: if cues in self.stimuli: literals[cues] = 1 else: literals[cues] = -1 yield Clamping(literals.iteritems())
python
{ "resource": "" }
q42499
Setup.to_funset
train
def to_funset(self): """ Converts the experimental setup to a set of `gringo.Fun`_ object instances Returns ------- set The set of `gringo.Fun`_ object instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set((gringo.Fun('stimulus', [str(var)]) for var in self.stimuli)) fs = fs.union((gringo.Fun('inhibitor', [str(var)]) for var in self.inhibitors)) fs = fs.union((gringo.Fun('readout', [str(var)]) for var in self.readouts)) return fs
python
{ "resource": "" }