_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q42500
Setup.from_json
train
def from_json(cls, filename): """ Creates an experimental setup from a JSON file Parameters ---------- filename : str Absolute path to JSON file Returns ------- caspo.core.setup.Setup Created object instance """ with open(filename) as fp: raw = json.load(fp) return cls(raw['stimuli'], raw['inhibitors'], raw['readouts'])
python
{ "resource": "" }
q42501
Setup.to_json
train
def to_json(self, filename): """ Writes the experimental setup to a JSON file Parameters ---------- filename : str Absolute path where to write the JSON file """ with open(filename, 'w') as fp: json.dump(dict(stimuli=self.stimuli, inhibitors=self.inhibitors, readouts=self.readouts), fp)
python
{ "resource": "" }
q42502
Setup.filter
train
def filter(self, networks): """ Returns a new experimental setup restricted to species present in the given list of networks Parameters ---------- networks : :class:`caspo.core.logicalnetwork.LogicalNetworkList` List of logical networks Returns ------- caspo.core.setup.Setup The restricted experimental setup """ cues = self.stimuli + self.inhibitors active_cues = set() active_readouts = set() for clause, var in networks.mappings: active_cues = active_cues.union((l for (l, s) in clause if l in cues)) if var in self.readouts: active_readouts.add(var) return Setup(active_cues.intersection(self.stimuli), active_cues.intersection(self.inhibitors), active_readouts)
python
{ "resource": "" }
q42503
Setup.cues
train
def cues(self, rename_inhibitors=False): """ Returns stimuli and inhibitors species of this experimental setup Parameters ---------- rename_inhibitors : boolean If True, rename inhibitors with an ending 'i' as in MIDAS files. Returns ------- list List of species names in order: first stimuli followed by inhibitors """ if rename_inhibitors: return self.stimuli + [i+'i' for i in self.inhibitors] else: return self.stimuli + self.inhibitors
python
{ "resource": "" }
q42504
SkipList.insert
train
def insert(self, key, value): """Insert a key-value pair in the list. The pair is inserted at the correct location so that the list remains sorted on *key*. If a pair with the same key is already in the list, then the pair is appended after all other pairs with that key. """ self._find_lte(key) node = self._create_node(key, value) self._insert(node)
python
{ "resource": "" }
q42505
SkipList.clear
train
def clear(self): """Remove all key-value pairs.""" for i in range(self.maxlevel): self._head[2+i] = self._tail self._tail[-1] = 0 self._level = 1
python
{ "resource": "" }
q42506
SkipList.items
train
def items(self, start=None, stop=None): """Return an iterator yielding pairs. If *start* is specified, iteration starts at the first pair with a key that is larger than or equal to *start*. If not specified, iteration starts at the first pair in the list. If *stop* is specified, iteration stops at the last pair that is smaller than *stop*. If not specified, iteration end with the last pair in the list. """ if start is None: node = self._head[2] else: self._find_lt(start) node = self._path[0][2] while node is not self._tail and (stop is None or node[0] < stop): yield (node[0], node[1]) node = node[2]
python
{ "resource": "" }
q42507
SkipList.popitem
train
def popitem(self): """Removes the first key-value pair and return it. This method raises a ``KeyError`` if the list is empty. """ node = self._head[2] if node is self._tail: raise KeyError('list is empty') self._find_lt(node[0]) self._remove(node) return (node[0], node[1])
python
{ "resource": "" }
q42508
TabsAPI.update_tab_for_course
train
def update_tab_for_course(self, tab_id, course_id, hidden=None, position=None): """ Update a tab for a course. Home and Settings tabs are not manageable, and can't be hidden or moved Returns a tab object """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - tab_id """ID""" path["tab_id"] = tab_id # OPTIONAL - position """The new position of the tab, 1-based""" if position is not None: data["position"] = position # OPTIONAL - hidden """no description""" if hidden is not None: data["hidden"] = hidden self.logger.debug("PUT /api/v1/courses/{course_id}/tabs/{tab_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/tabs/{tab_id}".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42509
Select.select_all
train
def select_all(self, table, limit=MAX_ROWS_PER_QUERY, execute=True): """Query all rows and columns from a table.""" # Determine if a row per query limit should be set num_rows = self.count_rows(table) if num_rows > limit: return self._select_batched(table, '*', num_rows, limit, execute=execute) else: return self.select(table, '*', execute=execute)
python
{ "resource": "" }
q42510
Select.select_distinct
train
def select_distinct(self, table, cols='*', execute=True): """Query distinct values from a table.""" return self.select(table, cols, execute, select_type='SELECT DISTINCT')
python
{ "resource": "" }
q42511
Select.select
train
def select(self, table, cols, execute=True, select_type='SELECT', return_type=list): """Query every row and only certain columns from a table.""" # Validate query type select_type = select_type.upper() assert select_type in SELECT_QUERY_TYPES # Concatenate statement statement = '{0} {1} FROM {2}'.format(select_type, join_cols(cols), wrap(table)) if not execute: # Return command return statement # Retrieve values values = self.fetch(statement) return self._return_rows(table, cols, values, return_type)
python
{ "resource": "" }
q42512
Select.select_limit
train
def select_limit(self, table, cols='*', offset=0, limit=MAX_ROWS_PER_QUERY): """Run a select query with an offset and limit parameter.""" return self.fetch(self._select_limit_statement(table, cols, offset, limit))
python
{ "resource": "" }
q42513
Select.select_where
train
def select_where(self, table, cols, where, return_type=list): """ Query certain rows from a table where a particular value is found. cols parameter can be passed as a iterable (list, set, tuple) or a string if only querying a single column. where parameter can be passed as a two or three part tuple. If only two parts are passed the assumed operator is equals(=). :param table: Name of table :param cols: List, tuple or set of columns or string with single column name :param where: WHERE clause, accepts either a two or three part tuple two-part: (where_column, where_value) three-part: (where_column, comparison_operator, where_value) :param return_type: Type, type to return values in :return: Queried rows """ # Unpack WHERE clause dictionary into tuple if isinstance(where, (list, set)): # Multiple WHERE clause's (separate with AND) clauses = [self._where_clause(clause) for clause in where] where_statement = ' AND '.join(clauses) else: where_statement = self._where_clause(where) # Concatenate full statement and execute statement = "SELECT {0} FROM {1} WHERE {2}".format(join_cols(cols), wrap(table), where_statement) values = self.fetch(statement) return self._return_rows(table, cols, values, return_type)
python
{ "resource": "" }
q42514
Select.select_where_between
train
def select_where_between(self, table, cols, where_col, between): """ Query rows from a table where a columns value is found between two values. :param table: Name of the table :param cols: List, tuple or set of columns or string with single column name :param where_col: Column to check values against :param between: Tuple with min and max values for comparison :return: Queried rows """ # Unpack WHERE clause dictionary into tuple min_val, max_val = between # Concatenate full statement and execute statement = "SELECT {0} FROM {1} WHERE {2} BETWEEN {3} AND {4}".format(join_cols(cols), wrap(table), where_col, min_val, max_val) return self.fetch(statement)
python
{ "resource": "" }
q42515
Select.select_where_like
train
def select_where_like(self, table, cols, where_col, start=None, end=None, anywhere=None, index=(None, None), length=None): """ Query rows from a table where a specific pattern is found in a column. MySQL syntax assumptions: (%) The percent sign represents zero, one, or multiple characters. (_) The underscore represents a single character. :param table: Name of the table :param cols: List, tuple or set of columns or string with single column name :param where_col: Column to check pattern against :param start: Value to be found at the start :param end: Value to be found at the end :param anywhere: Value to be found anywhere :param index: Value to be found at a certain index :param length: Minimum character length :return: Queried rows """ # Retrieve search pattern pattern = self._like_pattern(start, end, anywhere, index, length) # Concatenate full statement and execute statement = "SELECT {0} FROM {1} WHERE {2} LIKE '{3}'".format(join_cols(cols), wrap(table), where_col, pattern) return self.fetch(statement)
python
{ "resource": "" }
q42516
Select._where_clause
train
def _where_clause(where): """ Unpack a where clause tuple and concatenate a MySQL WHERE statement. :param where: 2 or 3 part tuple containing a where_column and a where_value (optional operator) :return: WHERE clause statement """ assert isinstance(where, tuple) if len(where) == 3: where_col, operator, where_val = where else: where_col, where_val = where operator = '=' assert operator in SELECT_WHERE_OPERATORS # Concatenate WHERE clause (ex: **first_name='John'**) return "{0}{1}'{2}'".format(where_col, operator, where_val)
python
{ "resource": "" }
q42517
Select._return_rows
train
def _return_rows(self, table, cols, values, return_type): """Return fetched rows in the desired type.""" if return_type is dict: # Pack each row into a dictionary cols = self.get_columns(table) if cols is '*' else cols if len(values) > 0 and isinstance(values[0], (set, list, tuple)): return [dict(zip(cols, row)) for row in values] else: return dict(zip(cols, values)) elif return_type is tuple: return [tuple(row) for row in values] else: return values
python
{ "resource": "" }
q42518
Select._select_batched
train
def _select_batched(self, table, cols, num_rows, limit, queries_per_batch=3, execute=True): """Run select queries in small batches and return joined resutls.""" # Execute select queries in small batches to avoid connection timeout commands, offset = [], 0 while num_rows > 0: # Use number of rows as limit if num_rows < limit _limit = min(limit, num_rows) # Execute select_limit query commands.append(self._select_limit_statement(table, cols=cols, offset=offset, limit=limit)) offset += _limit num_rows += -_limit # Execute commands if execute: rows = [] til_reconnect = queries_per_batch for c in commands: if til_reconnect == 0: self.disconnect() self.reconnect() til_reconnect = queries_per_batch rows.extend(self.fetch(c, False)) til_reconnect += -1 del commands return rows # Return commands else: return commands
python
{ "resource": "" }
q42519
Select._select_limit_statement
train
def _select_limit_statement(table, cols='*', offset=0, limit=MAX_ROWS_PER_QUERY): """Concatenate a select with offset and limit statement.""" return 'SELECT {0} FROM {1} LIMIT {2}, {3}'.format(join_cols(cols), wrap(table), offset, limit)
python
{ "resource": "" }
q42520
Select._like_pattern
train
def _like_pattern(start, end, anywhere, index, length): """ Create a LIKE pattern to use as a search parameter for a WHERE clause. :param start: Value to be found at the start :param end: Value to be found at the end :param anywhere: Value to be found anywhere :param index: Value to be found at a certain index :param length: Minimum character length :return: WHERE pattern """ # Unpack index tuple index_num, index_char = index index = None # Start, end, anywhere if all(i for i in [start, end, anywhere]) and not any(i for i in [index, length]): return '{start}%{anywhere}%{end}'.format(start=start, end=end, anywhere=anywhere) # Start, end elif all(i for i in [start, end]) and not any(i for i in [anywhere, index, length]): return '{start}%{end}'.format(start=start, end=end) # Start, anywhere elif all(i for i in [start, anywhere]) and not any(i for i in [end, index, length]): return '{start}%{anywhere}%'.format(start=start, anywhere=anywhere) # End, anywhere elif all(i for i in [end, anywhere]) and not any(i for i in [start, index, length]): return '%{anywhere}%{end}'.format(end=end, anywhere=anywhere) # Start elif start and not any(i for i in [end, anywhere, index, length]): return '{start}%'.format(start=start) # End elif end and not any(i for i in [start, anywhere, index, length]): return '%{end}'.format(end=end) # Anywhere elif anywhere and not any(i for i in [start, end, index, length]): return '%{anywhere}%'.format(anywhere=anywhere) # Index elif index_num and index_char and not any(i for i in [start, end, anywhere, length]): return '{index_num}{index_char}%'.format(index_num='_' * (index_num + 1), index_char=index_char) # Length elif length and not any(i for i in [start, end, anywhere, index]): return '{length}'.format(length='_%' * length) else: return None
python
{ "resource": "" }
q42521
insert_statement
train
def insert_statement(table, columns, values): """Generate an insert statement string for dumping to text file or MySQL execution.""" if not all(isinstance(r, (list, set, tuple)) for r in values): values = [[r] for r in values] rows = [] for row in values: new_row = [] for col in row: if col is None: new_col = 'NULL' elif isinstance(col, (int, float, Decimal)): new_col = str(MySQLConverterBase().to_mysql(col)) else: string = str(MySQLConverterBase().to_mysql(col)) if "'" in string: new_col = '"' + string + '"' else: new_col = "'" + string + "'" new_row.append(new_col) rows.append(', '.join(new_row)) vals = '(' + '),\n\t('.join(rows) + ')' statement = "INSERT INTO\n\t{0} ({1}) \nVALUES\n\t{2}".format(wrap(table), cols_str(columns), vals) return statement
python
{ "resource": "" }
q42522
Export.dump_table
train
def dump_table(self, table, drop_statement=True): """Export a table structure and data to SQL file for backup or later import.""" create_statement = self.get_table_definition(table) data = self.select_all(table) statements = ['\n', sql_file_comment(''), sql_file_comment('Table structure and data dump for {0}'.format(table)), sql_file_comment('')] if drop_statement: statements.append('\nDROP TABLE IF EXISTS {0};'.format(wrap(table))) statements.append('{0};\n'.format(create_statement)) if len(data) > 0: statements.append('{0};'.format(insert_statement(table, self.get_columns(table), data))) return '\n'.join(statements)
python
{ "resource": "" }
q42523
Export.dump_database
train
def dump_database(self, file_path, database=None, tables=None): """ Export the table structure and data for tables in a database. If not database is specified, it is assumed the currently connected database is the source. If no tables are provided, all tables will be dumped. """ # Change database if needed if database: self.change_db(database) # Set table if not tables: tables = self.tables # Retrieve and join dump statements statements = [self.dump_table(table) for table in tqdm(tables, total=len(tables), desc='Generating dump files')] dump = 'SET FOREIGN_KEY_CHECKS=0;' + '\n'.join(statements) + '\nSET FOREIGN_KEY_CHECKS=1;' # Write dump statements to sql file file_path = file_path if file_path.endswith('.sql') else file_path + '.sql' write_text(dump, file_path) return file_path
python
{ "resource": "" }
q42524
retry
train
def retry(method): """ Allows to retry method execution few times. """ def inner(self, *args, **kwargs): attempt_number = 1 while attempt_number < self.retries: try: return method(self, *args, **kwargs) except HasOffersException as exc: if 'API usage exceeded rate limit' not in str(exc): raise exc self.logger.debug('Retrying due: %s', exc) time.sleep(self.retry_timeout) except requests.exceptions.ConnectionError: # This happens when the session gets expired self.logger.debug('Recreating session due to ConnectionError') self._session = requests.Session() attempt_number += 1 raise MaxRetriesExceeded return inner
python
{ "resource": "" }
q42525
HasOffersAPI.setup_managers
train
def setup_managers(self): """ Allows to access manager by model name - it is convenient, because HasOffers returns model names in responses. """ self._managers = {} for manager_class in MODEL_MANAGERS: instance = manager_class(self) if not instance.forbid_registration \ and not isinstance(instance, ApplicationManager) or instance.__class__ is ApplicationManager: # Descendants of ``ApplicationManager`` shouldn't be present in API instance. They are controlled by # Application controller. The manager itself, on the other hand, should. setattr(self, instance.name, instance) if instance.model: self._managers[instance.model.__name__] = instance if instance.model_aliases: for alias in instance.model_aliases: self._managers[alias] = instance
python
{ "resource": "" }
q42526
HasOffersAPI.handle_response
train
def handle_response(self, content, target=None, single_result=True, raw=False): """ Parses response, checks it. """ response = content['response'] self.check_errors(response) data = response.get('data') if is_empty(data): return data elif is_paginated(data): if 'count' in data and not data['count']: # Response is paginated, but is empty return data['data'] data = data['data'] if raw: return data return self.init_all_objects(data, target=target, single_result=single_result)
python
{ "resource": "" }
q42527
HasOffersAPI.init_all_objects
train
def init_all_objects(self, data, target=None, single_result=True): """ Initializes model instances from given data. Returns single instance if single_result=True. """ if single_result: return self.init_target_object(target, data) return list(self.expand_models(target, data))
python
{ "resource": "" }
q42528
HasOffersAPI.init_target_object
train
def init_target_object(self, target, data): """ Initializes target object and assign extra objects to target as attributes """ target_object = self.init_single_object(target, data.pop(target, data)) for key, item in data.items(): key_alias = MANAGER_ALIASES.get(key, key) if item: # Item is an OrderedDict with 4 possible structure patterns: # - Just an OrderedDict with (key - value)'s # - OrderedDict with single (key - OrderedDict) # - OrderedDict with multiple (key - OrderedDict)'s # - String (like CreativeCode model) if isinstance(item, str): children = item else: first_key = list(item.keys())[0] if isinstance(item[first_key], OrderedDict): instances = item.values() if len(instances) > 1: children = [self.init_single_object(key_alias, instance) for instance in instances] else: children = self.init_single_object(key_alias, list(instances)[0]) else: children = self.init_single_object(key_alias, item) setattr(target_object, key.lower(), children) else: setattr(target_object, key.lower(), None) return target_object
python
{ "resource": "" }
q42529
HasOffersAPI.expand_models
train
def expand_models(self, target, data): """ Generates all objects from given data. """ if isinstance(data, dict): data = data.values() for chunk in data: if target in chunk: yield self.init_target_object(target, chunk) else: for key, item in chunk.items(): yield self.init_single_object(key, item)
python
{ "resource": "" }
q42530
merge_roles
train
def merge_roles(dominant_name, deprecated_name): """ Merges a deprecated role into a dominant role. """ dominant_qs = ContributorRole.objects.filter(name=dominant_name) if not dominant_qs.exists() or dominant_qs.count() != 1: return dominant = dominant_qs.first() deprecated_qs = ContributorRole.objects.filter(name=deprecated_name) if not deprecated_qs.exists() or deprecated_qs.count() != 1: return deprecated = deprecated_qs.first() # Update Rates if not dominant.flat_rates.exists() and deprecated.flat_rates.exists(): flat_rate = deprecated.flat_rates.first() flat_rate.role = dominant flat_rate.save() if not dominant.hourly_rates.exists() and deprecated.hourly_rates.exists(): hourly_rate = deprecated.hourly_rates.first() hourly_rate.role = dominant hourly_rate.save() for ft_rate in deprecated.feature_type_rates.all(): dom_ft_rate = dominant.feature_type_rates.filter(feature_type=ft_rate.feature_type) if dom_ft_rate.exists() and dom_ft_rate.first().rate == 0: dom_ft_rate.first().delete() if not dom_ft_rate.exists(): ft_rate.role = dominant ft_rate.save() # Update contributions for contribution in deprecated.contribution_set.all(): contribution.role = dominant contribution.save() # Update overrides for override in deprecated.overrides.all(): dom_override_qs = dominant.overrides.filter(contributor=override.contributor) if not dom_override_qs.exists(): override.role = dominant override.save() else: dom_override = dom_override_qs.first() for flat_override in override.override_flatrate.all(): flat_override.profile = dom_override flat_override.save() for hourly_override in override.override_hourly.all(): hourly_override.profile = dom_override hourly_override.save() for feature_type_override in override.override_feature_type.all(): feature_type_override.profile = dom_override feature_type_override.save()
python
{ "resource": "" }
q42531
Bugzilla.quick_search
train
def quick_search(self, terms): '''Wrapper for search_bugs, for simple string searches''' assert type(terms) is str p = [{'quicksearch': terms}] return self.search_bugs(p)
python
{ "resource": "" }
q42532
Bugzilla._get
train
def _get(self, q, params=''): '''Generic GET wrapper including the api_key''' if (q[-1] == '/'): q = q[:-1] headers = {'Content-Type': 'application/json'} r = requests.get('{url}{q}?api_key={key}{params}'.format(url=self.url, q=q, key=self.api_key, params=params), headers=headers) ret = DotDict(r.json()) if (not r.ok or ('error' in ret and ret.error == True)): raise Exception(r.url, r.reason, r.status_code, r.json()) return DotDict(r.json())
python
{ "resource": "" }
q42533
Bugzilla._post
train
def _post(self, q, payload='', params=''): '''Generic POST wrapper including the api_key''' if (q[-1] == '/'): q = q[:-1] headers = {'Content-Type': 'application/json'} r = requests.post('{url}{q}?api_key={key}{params}'.format(url=self.url, q=q, key=self.api_key, params=params), headers=headers, data=payload) ret = DotDict(r.json()) if (not r.ok or ('error' in ret and ret.error == True)): raise Exception(r.url, r.reason, r.status_code, r.json()) return DotDict(r.json())
python
{ "resource": "" }
q42534
game_system.bind_objects
train
def bind_objects(self, *objects): """Bind one or more objects""" self.control.bind_keys(objects) self.objects += objects
python
{ "resource": "" }
q42535
game_system.draw
train
def draw(self): """Draw all the sprites in the system using their renderers. This method is convenient to call from you Pyglet window's on_draw handler to redraw particles when needed. """ glPushAttrib(GL_ALL_ATTRIB_BITS) self.draw_score() for sprite in self: sprite.draw() glPopAttrib()
python
{ "resource": "" }
q42536
ball.reset_ball
train
def reset_ball(self, x, y): """reset ball to set location on the screen""" self.sprite.position.x = x self.sprite.position.y = y
python
{ "resource": "" }
q42537
ball.update
train
def update(self, td): """Update state of ball""" self.sprite.last_position = self.sprite.position self.sprite.last_velocity = self.sprite.velocity if self.particle_group != None: self.update_particle_group(td)
python
{ "resource": "" }
q42538
Box.generate
train
def generate(self): """Return a random point inside the box""" x, y, z = self.point1 return (x + self.size_x * random(), y + self.size_y * random(), z + self.size_z * random())
python
{ "resource": "" }
q42539
custom_search_model
train
def custom_search_model(model, query, preview=False, published=False, id_field="id", sort_pinned=True, field_map={}): """Filter a model with the given filter. `field_map` translates incoming field names to the appropriate ES names. """ if preview: func = preview_filter_from_query else: func = filter_from_query f = func(query, id_field=id_field, field_map=field_map) # filter by published if published: if f: f &= Range(published={"lte": timezone.now()}) else: f = Range(published={"lte": timezone.now()}) qs = model.search_objects.search(published=False) if f: qs = qs.filter(f) # possibly include a text query if query.get("query"): qs = qs.query("match", _all=query["query"]) # set up pinned ids pinned_ids = query.get("pinned_ids") if pinned_ids and sort_pinned: pinned_query = es_query.FunctionScore( boost_mode="multiply", functions=[{ "filter": Terms(id=pinned_ids), "weight": 2 }] ) qs = qs.query(pinned_query) qs = qs.sort("_score", "-published") else: qs = qs.sort("-published") return qs
python
{ "resource": "" }
q42540
preview_filter_from_query
train
def preview_filter_from_query(query, id_field="id", field_map={}): """This filter includes the "excluded_ids" so they still show up in the editor.""" f = groups_filter_from_query(query, field_map=field_map) # NOTE: we don't exclude the excluded ids here so they show up in the editor # include these, please included_ids = query.get("included_ids") if included_ids: if f: f |= Terms(pk=included_ids) else: f = Terms(pk=included_ids) return f
python
{ "resource": "" }
q42541
filter_from_query
train
def filter_from_query(query, id_field="id", field_map={}): """This returns a filter which actually filters out everything, unlike the preview filter which includes excluded_ids for UI purposes. """ f = groups_filter_from_query(query, field_map=field_map) excluded_ids = query.get("excluded_ids") included_ids = query.get("included_ids") if included_ids: # include these, please if f is None: f = Terms(pk=included_ids) else: f |= Terms(pk=included_ids) if excluded_ids: # exclude these if f is None: f = MatchAll() f &= ~Terms(pk=excluded_ids) return f
python
{ "resource": "" }
q42542
get_condition_filter
train
def get_condition_filter(condition, field_map={}): """ Return the appropriate filter for a given group condition. # TODO: integrate this into groups_filter_from_query function. """ field_name = condition.get("field") field_name = field_map.get(field_name, field_name) operation = condition["type"] values = condition["values"] condition_filter = MatchAll() if values: values = [v["value"] for v in values] if operation == "all": for value in values: if "." in field_name: path = field_name.split(".")[0] condition_filter &= Nested(path=path, filter=Term(**{field_name: value})) else: condition_filter &= Term(**{field_name: value}) elif operation == "any": if "." in field_name: path = field_name.split(".")[0] condition_filter &= Nested(path=path, filter=Terms(**{field_name: values})) else: condition_filter &= Terms(**{field_name: values}) elif operation == "none": if "." in field_name: path = field_name.split(".")[0] condition_filter &= ~Nested(path=path, filter=Terms(**{field_name: values})) else: condition_filter &= ~Terms(**{field_name: values}) else: raise ValueError( """ES conditions must be one of the following values: ['all', 'any', 'none']""" ) return condition_filter
python
{ "resource": "" }
q42543
groups_filter_from_query
train
def groups_filter_from_query(query, field_map={}): """Creates an F object for the groups of a search query.""" f = None # filter groups for group in query.get("groups", []): group_f = MatchAll() for condition in group.get("conditions", []): field_name = condition["field"] field_name = field_map.get(field_name, field_name) operation = condition["type"] values = condition["values"] if values: values = [v["value"] for v in values] if operation == "all": # NOTE: is there a better way to express this? for value in values: if "." in field_name: path = field_name.split(".")[0] group_f &= Nested(path=path, filter=Term(**{field_name: value})) else: group_f &= Term(**{field_name: value}) elif operation == "any": if "." in field_name: path = field_name.split(".")[0] group_f &= Nested(path=path, filter=Terms(**{field_name: values})) else: group_f &= Terms(**{field_name: values}) elif operation == "none": if "." in field_name: path = field_name.split(".")[0] group_f &= ~Nested(path=path, filter=Terms(**{field_name: values})) else: group_f &= ~Terms(**{field_name: values}) date_range = group.get("time") if date_range: group_f &= date_range_filter(date_range) if f: f |= group_f else: f = group_f return f
python
{ "resource": "" }
q42544
date_range_filter
train
def date_range_filter(range_name): """Create a filter from a named date range.""" filter_days = list(filter( lambda time: time["label"] == range_name, settings.CUSTOM_SEARCH_TIME_PERIODS)) num_days = filter_days[0]["days"] if len(filter_days) else None if num_days: dt = timedelta(num_days) start_time = timezone.now() - dt return Range(published={"gte": start_time}) return MatchAll()
python
{ "resource": "" }
q42545
Rest.request
train
def request(self, action, data={}, headers={}, method='GET'): """ Append the REST headers to every request """ headers = { "Authorization": "Bearer " + self.token, "Content-Type": "application/json", "X-Version": "1", "Accept": "application/json" } return Transport.request(self, action, data, headers, method)
python
{ "resource": "" }
q42546
AdminsAPI.make_account_admin
train
def make_account_admin(self, user_id, account_id, role=None, role_id=None, send_confirmation=None): """ Make an account admin. Flag an existing user as an admin within the account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - user_id """The id of the user to promote.""" data["user_id"] = user_id # OPTIONAL - role """(deprecated) The user's admin relationship with the account will be created with the given role. Defaults to 'AccountAdmin'.""" if role is not None: data["role"] = role # OPTIONAL - role_id """The user's admin relationship with the account will be created with the given role. Defaults to the built-in role for 'AccountAdmin'.""" if role_id is not None: data["role_id"] = role_id # OPTIONAL - send_confirmation """Send a notification email to the new admin if true. Default is true.""" if send_confirmation is not None: data["send_confirmation"] = send_confirmation self.logger.debug("POST /api/v1/accounts/{account_id}/admins with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/accounts/{account_id}/admins".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42547
AdminsAPI.list_account_admins
train
def list_account_admins(self, account_id, user_id=None): """ List account admins. List the admins in the account """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - user_id """Scope the results to those with user IDs equal to any of the IDs specified here.""" if user_id is not None: params["user_id"] = user_id self.logger.debug("GET /api/v1/accounts/{account_id}/admins with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/admins".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42548
TintRegistry.match_name
train
def match_name(self, in_string, fuzzy=False): """Match a color to a sRGB value. The matching will be based purely on the input string and the color names in the registry. If there's no direct hit, a fuzzy matching algorithm is applied. This method will never fail to return a sRGB value, but depending on the score, it might or might not be a sensible result – as a rule of thumb, any score less then 90 indicates that there's a lot of guessing going on. It's the callers responsibility to judge if the return value should be trusted. In normalization terms, this method implements "normalize an arbitrary color name to a sRGB value". Args: in_string (string): The input string containing something resembling a color name. fuzzy (bool, optional): Try fuzzy matching if no exact match was found. Defaults to ``False``. Returns: A named tuple with the members `hex_code` and `score`. Raises: ValueError: If ``fuzzy`` is ``False`` and no match is found Examples: >>> tint_registry = TintRegistry() >>> tint_registry.match_name("rather white", fuzzy=True) MatchResult(hex_code=u'ffffff', score=95) """ in_string = _normalize(in_string) if in_string in self._hex_by_color: return MatchResult(self._hex_by_color[in_string], 100) if not fuzzy: raise ValueError("No match for %r found." % in_string) # We want the standard scorer *plus* the set scorer, because colors are often # (but not always) related by sub-strings color_names = self._hex_by_color.keys() set_match = dict(fuzzywuzzy.process.extract( in_string, color_names, scorer=fuzzywuzzy.fuzz.token_set_ratio )) standard_match = dict(fuzzywuzzy.process.extract(in_string, color_names)) # This would be much easier with a collections.Counter, but alas! it's a 2.7 feature. key_union = set(set_match) | set(standard_match) counter = ((n, set_match.get(n, 0) + standard_match.get(n, 0)) for n in key_union) color_name, score = sorted(counter, key=operator.itemgetter(1))[-1] return MatchResult(self._hex_by_color[color_name], score / 2)
python
{ "resource": "" }
q42549
TintRegistry.find_nearest
train
def find_nearest(self, hex_code, system, filter_set=None): """Find a color name that's most similar to a given sRGB hex code. In normalization terms, this method implements "normalize an arbitrary sRGB value to a well-defined color name". Args: system (string): The color system. Currently, `"en"`` is the only default system. filter_set (iterable of string, optional): Limits the output choices to fewer color names. The names (e.g. ``["black", "white"]``) must be present in the given system. If omitted, all color names of the system are considered. Defaults to None. Returns: A named tuple with the members `color_name` and `distance`. Raises: ValueError: If argument `system` is not a registered color system. Examples: >>> tint_registry = TintRegistry() >>> tint_registry.find_nearest("54e6e4", system="en") FindResult(color_name=u'bright turquoise', distance=3.730288645055483) >>> tint_registry.find_nearest("54e6e4", "en", filter_set=("white", "black")) FindResult(color_name=u'white', distance=25.709952192116894) """ if system not in self._colors_by_system_hex: raise ValueError( "%r is not a registered color system. Try one of %r" % (system, self._colors_by_system_hex.keys()) ) hex_code = hex_code.lower().strip() # Try direct hit (fast path) if hex_code in self._colors_by_system_hex[system]: color_name = self._colors_by_system_hex[system][hex_code] if filter_set is None or color_name in filter_set: return FindResult(color_name, 0) # No direct hit, assemble list of lab_color/color_name pairs colors = self._colors_by_system_lab[system] if filter_set is not None: colors = (pair for pair in colors if pair[1] in set(filter_set)) # find minimal distance lab_color = _hex_to_lab(hex_code) min_distance = sys.float_info.max min_color_name = None for current_lab_color, current_color_name in colors: distance = colormath.color_diff.delta_e_cie2000(lab_color, current_lab_color) if distance < min_distance: min_distance = distance min_color_name = current_color_name return FindResult(min_color_name, min_distance)
python
{ "resource": "" }
q42550
SessionAuthSourceInitializer
train
def SessionAuthSourceInitializer( value_key='sanity.' ): """ An authentication source that uses the current session """ value_key = value_key + 'value' @implementer(IAuthSourceService) class SessionAuthSource(object): vary = [] def __init__(self, context, request): self.request = request self.session = request.session self.cur_val = None def get_value(self): if self.cur_val is None: self.cur_val = self.session.get(value_key, [None, None]) return self.cur_val def headers_remember(self, value): if self.cur_val is None: self.cur_val = self.session.get(value_key, [None, None]) self.session[value_key] = value return [] def headers_forget(self): if self.cur_val is None: self.cur_val = self.session.get(value_key, [None, None]) if value_key in self.session: del self.session[value_key] return [] return SessionAuthSource
python
{ "resource": "" }
q42551
CookieAuthSourceInitializer
train
def CookieAuthSourceInitializer( secret, cookie_name='auth', secure=False, max_age=None, httponly=False, path="/", domains=None, debug=False, hashalg='sha512', ): """ An authentication source that uses a unique cookie. """ @implementer(IAuthSourceService) class CookieAuthSource(object): vary = ['Cookie'] def __init__(self, context, request): self.domains = domains if self.domains is None: self.domains = [] self.domains.append(request.domain) self.cookie = SignedCookieProfile( secret, 'authsanity', cookie_name, secure=secure, max_age=max_age, httponly=httponly, path=path, domains=domains, hashalg=hashalg, ) # Bind the cookie to the current request self.cookie = self.cookie.bind(request) def get_value(self): val = self.cookie.get_value() if val is None: return [None, None] return val def headers_remember(self, value): return self.cookie.get_headers(value, domains=self.domains) def headers_forget(self): return self.cookie.get_headers(None, max_age=0) return CookieAuthSource
python
{ "resource": "" }
q42552
HeaderAuthSourceInitializer
train
def HeaderAuthSourceInitializer( secret, salt='sanity.header.' ): """ An authentication source that uses the Authorization header. """ @implementer(IAuthSourceService) class HeaderAuthSource(object): vary = ['Authorization'] def __init__(self, context, request): self.request = request self.cur_val = None serializer = JSONSerializer() self.serializer = SignedSerializer( secret, salt, serializer=serializer, ) def _get_authorization(self): try: type, token = self.request.authorization return self.serializer.loads(token) except Exception: return None def _create_authorization(self, value): try: return self.serializer.dumps(value) except Exception: return '' def get_value(self): if self.cur_val is None: self.cur_val = self._get_authorization() or [None, None] return self.cur_val def headers_remember(self, value): if self.cur_val is None: self.cur_val = None token = self._create_authorization(value) auth_info = native_(b'Bearer ' + token, 'latin-1', 'strict') return [('Authorization', auth_info)] def headers_forget(self): if self.cur_val is None: self.cur_val = None return [] return HeaderAuthSource
python
{ "resource": "" }
q42553
PWRESTHandler.sort
train
def sort(self, *sorting, **kwargs): """Sort resources.""" sorting_ = [] for name, desc in sorting: field = self.meta.model._meta.fields.get(name) if field is None: continue if desc: field = field.desc() sorting_.append(field) if sorting_: return self.collection.order_by(*sorting_) return self.collection
python
{ "resource": "" }
q42554
PWRESTHandler.paginate
train
def paginate(self, request, offset=0, limit=None): """Paginate queryset.""" return self.collection.offset(offset).limit(limit), self.collection.count()
python
{ "resource": "" }
q42555
AccountsAPI.get_sub_accounts_of_account
train
def get_sub_accounts_of_account(self, account_id, recursive=None): """ Get the sub-accounts of an account. List accounts that are sub-accounts of the given account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - recursive """If true, the entire account tree underneath this account will be returned (though still paginated). If false, only direct sub-accounts of this account will be returned. Defaults to false.""" if recursive is not None: params["recursive"] = recursive self.logger.debug("GET /api/v1/accounts/{account_id}/sub_accounts with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/sub_accounts".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42556
AccountsAPI.list_active_courses_in_account
train
def list_active_courses_in_account(self, account_id, by_subaccounts=None, by_teachers=None, completed=None, enrollment_term_id=None, enrollment_type=None, hide_enrollmentless_courses=None, include=None, published=None, search_term=None, state=None, with_enrollments=None): """ List active courses in an account. Retrieve the list of courses in this account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - with_enrollments """If true, include only courses with at least one enrollment. If false, include only courses with no enrollments. If not present, do not filter on course enrollment status.""" if with_enrollments is not None: params["with_enrollments"] = with_enrollments # OPTIONAL - enrollment_type """If set, only return courses that have at least one user enrolled in in the course with one of the specified enrollment types.""" if enrollment_type is not None: self._validate_enum(enrollment_type, ["teacher", "student", "ta", "observer", "designer"]) params["enrollment_type"] = enrollment_type # OPTIONAL - published """If true, include only published courses. If false, exclude published courses. If not present, do not filter on published status.""" if published is not None: params["published"] = published # OPTIONAL - completed """If true, include only completed courses (these may be in state 'completed', or their enrollment term may have ended). If false, exclude completed courses. If not present, do not filter on completed status.""" if completed is not None: params["completed"] = completed # OPTIONAL - by_teachers """List of User IDs of teachers; if supplied, include only courses taught by one of the referenced users.""" if by_teachers is not None: params["by_teachers"] = by_teachers # OPTIONAL - by_subaccounts """List of Account IDs; if supplied, include only courses associated with one of the referenced subaccounts.""" if by_subaccounts is not None: params["by_subaccounts"] = by_subaccounts # OPTIONAL - hide_enrollmentless_courses """If present, only return courses that have at least one enrollment. Equivalent to 'with_enrollments=true'; retained for compatibility.""" if hide_enrollmentless_courses is not None: params["hide_enrollmentless_courses"] = hide_enrollmentless_courses # OPTIONAL - state """If set, only return courses that are in the given state(s). By default, all states but "deleted" are returned.""" if state is not None: self._validate_enum(state, ["created", "claimed", "available", "completed", "deleted", "all"]) params["state"] = state # OPTIONAL - enrollment_term_id """If set, only includes courses from the specified term.""" if enrollment_term_id is not None: params["enrollment_term_id"] = enrollment_term_id # OPTIONAL - search_term """The partial course name, code, or full ID to match and return in the results list. Must be at least 3 characters.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - include """- All explanations can be seen in the {api:CoursesController#index Course API index documentation} - "sections", "needs_grading_count" and "total_scores" are not valid options at the account level""" if include is not None: self._validate_enum(include, ["syllabus_body", "term", "course_progress", "storage_quota_used_mb", "total_students", "teachers"]) params["include"] = include self.logger.debug("GET /api/v1/accounts/{account_id}/courses with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/courses".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42557
AccountsAPI.update_account
train
def update_account(self, id, account_default_group_storage_quota_mb=None, account_default_storage_quota_mb=None, account_default_time_zone=None, account_default_user_storage_quota_mb=None, account_name=None, account_services=None, account_settings_lock_all_announcements_locked=None, account_settings_lock_all_announcements_value=None, account_settings_restrict_student_future_listing_locked=None, account_settings_restrict_student_future_listing_value=None, account_settings_restrict_student_future_view_locked=None, account_settings_restrict_student_future_view_value=None, account_settings_restrict_student_past_view_locked=None, account_settings_restrict_student_past_view_value=None): """ Update an account. Update an existing account. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - account[name] """Updates the account name""" if account_name is not None: data["account[name]"] = account_name # OPTIONAL - account[default_time_zone] """The default time zone of the account. Allowed time zones are {http://www.iana.org/time-zones IANA time zones} or friendlier {http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}.""" if account_default_time_zone is not None: data["account[default_time_zone]"] = account_default_time_zone # OPTIONAL - account[default_storage_quota_mb] """The default course storage quota to be used, if not otherwise specified.""" if account_default_storage_quota_mb is not None: data["account[default_storage_quota_mb]"] = account_default_storage_quota_mb # OPTIONAL - account[default_user_storage_quota_mb] """The default user storage quota to be used, if not otherwise specified.""" if account_default_user_storage_quota_mb is not None: data["account[default_user_storage_quota_mb]"] = account_default_user_storage_quota_mb # OPTIONAL - account[default_group_storage_quota_mb] """The default group storage quota to be used, if not otherwise specified.""" if account_default_group_storage_quota_mb is not None: data["account[default_group_storage_quota_mb]"] = account_default_group_storage_quota_mb # OPTIONAL - account[settings][restrict_student_past_view][value] """Restrict students from viewing courses after end date""" if account_settings_restrict_student_past_view_value is not None: data["account[settings][restrict_student_past_view][value]"] = account_settings_restrict_student_past_view_value # OPTIONAL - account[settings][restrict_student_past_view][locked] """Lock this setting for sub-accounts and courses""" if account_settings_restrict_student_past_view_locked is not None: data["account[settings][restrict_student_past_view][locked]"] = account_settings_restrict_student_past_view_locked # OPTIONAL - account[settings][restrict_student_future_view][value] """Restrict students from viewing courses before start date""" if account_settings_restrict_student_future_view_value is not None: data["account[settings][restrict_student_future_view][value]"] = account_settings_restrict_student_future_view_value # OPTIONAL - account[settings][restrict_student_future_view][locked] """Lock this setting for sub-accounts and courses""" if account_settings_restrict_student_future_view_locked is not None: data["account[settings][restrict_student_future_view][locked]"] = account_settings_restrict_student_future_view_locked # OPTIONAL - account[settings][lock_all_announcements][value] """Disable comments on announcements""" if account_settings_lock_all_announcements_value is not None: data["account[settings][lock_all_announcements][value]"] = account_settings_lock_all_announcements_value # OPTIONAL - account[settings][lock_all_announcements][locked] """Lock this setting for sub-accounts and courses""" if account_settings_lock_all_announcements_locked is not None: data["account[settings][lock_all_announcements][locked]"] = account_settings_lock_all_announcements_locked # OPTIONAL - account[settings][restrict_student_future_listing][value] """Restrict students from viewing future enrollments in course list""" if account_settings_restrict_student_future_listing_value is not None: data["account[settings][restrict_student_future_listing][value]"] = account_settings_restrict_student_future_listing_value # OPTIONAL - account[settings][restrict_student_future_listing][locked] """Lock this setting for sub-accounts and courses""" if account_settings_restrict_student_future_listing_locked is not None: data["account[settings][restrict_student_future_listing][locked]"] = account_settings_restrict_student_future_listing_locked # OPTIONAL - account[services] """Give this a set of keys and boolean values to enable or disable services matching the keys""" if account_services is not None: data["account[services]"] = account_services self.logger.debug("PUT /api/v1/accounts/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/accounts/{id}".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42558
AccountsAPI.create_new_sub_account
train
def create_new_sub_account(self, account_id, account_name, account_default_group_storage_quota_mb=None, account_default_storage_quota_mb=None, account_default_user_storage_quota_mb=None, account_sis_account_id=None): """ Create a new sub-account. Add a new sub-account to a given account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - account[name] """The name of the new sub-account.""" data["account[name]"] = account_name # OPTIONAL - account[sis_account_id] """The account's identifier in the Student Information System.""" if account_sis_account_id is not None: data["account[sis_account_id]"] = account_sis_account_id # OPTIONAL - account[default_storage_quota_mb] """The default course storage quota to be used, if not otherwise specified.""" if account_default_storage_quota_mb is not None: data["account[default_storage_quota_mb]"] = account_default_storage_quota_mb # OPTIONAL - account[default_user_storage_quota_mb] """The default user storage quota to be used, if not otherwise specified.""" if account_default_user_storage_quota_mb is not None: data["account[default_user_storage_quota_mb]"] = account_default_user_storage_quota_mb # OPTIONAL - account[default_group_storage_quota_mb] """The default group storage quota to be used, if not otherwise specified.""" if account_default_group_storage_quota_mb is not None: data["account[default_group_storage_quota_mb]"] = account_default_group_storage_quota_mb self.logger.debug("POST /api/v1/accounts/{account_id}/sub_accounts with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/accounts/{account_id}/sub_accounts".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42559
Delete.delete
train
def delete(self, table, where=None): """Delete existing rows from a table.""" if where: where_key, where_val = where query = "DELETE FROM {0} WHERE {1}='{2}'".format(wrap(table), where_key, where_val) else: query = 'DELETE FROM {0}'.format(wrap(table)) self.execute(query) return True
python
{ "resource": "" }
q42560
networks_distribution
train
def networks_distribution(df, filepath=None): """ Generates two alternative plots describing the distribution of variables `mse` and `size`. It is intended to be used over a list of logical networks. Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns `mse` and `size` filepath: str Absolute path to a folder where to write the plots Returns ------- tuple Generated plots .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ df.mse = df.mse.map(lambda f: "%.4f" % f) g = sns.JointGrid(x="mse", y="size", data=df) g.plot_joint(sns.violinplot, scale='count') g.ax_joint.set_yticks(range(df['size'].min(), df['size'].max() + 1)) g.ax_joint.set_yticklabels(range(df['size'].min(), df['size'].max() + 1)) for tick in g.ax_joint.get_xticklabels(): tick.set_rotation(90) g.ax_joint.set_xlabel("MSE") g.ax_joint.set_ylabel("Size") for i, t in enumerate(g.ax_joint.get_xticklabels()): c = df[df['mse'] == t.get_text()].shape[0] g.ax_marg_x.annotate(c, xy=(i, 0.5), va="center", ha="center", size=20, rotation=90) for i, t in enumerate(g.ax_joint.get_yticklabels()): s = int(t.get_text()) c = df[df['size'] == s].shape[0] g.ax_marg_y.annotate(c, xy=(0.5, s), va="center", ha="center", size=20) if filepath: g.savefig(os.path.join(filepath, 'networks-distribution.pdf')) plt.figure() counts = df[["size", "mse"]].reset_index(level=0).groupby(["size", "mse"], as_index=False).count() cp = counts.pivot("size", "mse", "index").sort_index() ax = sns.heatmap(cp, annot=True, fmt=".0f", linewidths=.5) ax.set_xlabel("MSE") ax.set_ylabel("Size") if filepath: plt.savefig(os.path.join(filepath, 'networks-heatmap.pdf')) return g, ax
python
{ "resource": "" }
q42561
mappings_frequency
train
def mappings_frequency(df, filepath=None): """ Plots the frequency of logical conjunction mappings Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns `frequency` and `mapping` filepath: str Absolute path to a folder where to write the plot Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ df = df.sort_values('frequency') df['conf'] = df.frequency.map(lambda f: 0 if f < 0.2 else 1 if f < 0.8 else 2) g = sns.factorplot(x="mapping", y="frequency", data=df, aspect=3, hue='conf', legend=False) for tick in g.ax.get_xticklabels(): tick.set_rotation(90) g.ax.set_ylim([-.05, 1.05]) g.ax.set_xlabel("Logical mapping") g.ax.set_ylabel("Frequency") if filepath: g.savefig(os.path.join(filepath, 'mappings-frequency.pdf')) return g
python
{ "resource": "" }
q42562
behaviors_distribution
train
def behaviors_distribution(df, filepath=None): """ Plots the distribution of logical networks across input-output behaviors. Optionally, input-output behaviors can be grouped by MSE. Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns `networks` and optionally `mse` filepath: str Absolute path to a folder where to write the plot Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ cols = ["networks", "index"] rcols = ["Logical networks", "Input-Output behaviors"] sort_cols = ["networks"] if "mse" in df.columns: cols.append("mse") rcols.append("MSE") sort_cols = ["mse"] + sort_cols df.mse = df.mse.map(lambda f: "%.4f" % f) df = df.sort_values(sort_cols).reset_index(drop=True).reset_index(level=0)[cols] df.columns = rcols if "MSE" in df.columns: g = sns.factorplot(x='Input-Output behaviors', y='Logical networks', hue='MSE', data=df, aspect=3, kind='bar', legend_out=False) else: g = sns.factorplot(x='Input-Output behaviors', y='Logical networks', data=df, aspect=3, kind='bar', legend_out=False) g.ax.set_xticks([]) if filepath: g.savefig(os.path.join(filepath, 'behaviors-distribution.pdf')) return g
python
{ "resource": "" }
q42563
experimental_designs
train
def experimental_designs(df, filepath=None): """ For each experimental design it plot all the corresponding experimental conditions in a different plot Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns `id` and starting with `TR:` filepath: str Absolute path to a folder where to write the plot Returns ------- list Generated plots .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ axes = [] bw = matplotlib.colors.ListedColormap(['white', 'black']) cols = df.columns for i, dd in df.groupby("id"): cues = dd.drop([c for c in cols if not c.startswith("TR:")] + ["id"], axis=1).reset_index(drop=True) cues.columns = [c[3:] for c in cues.columns] plt.figure(figsize=(max((len(cues.columns)-1) * .5, 4), max(len(cues)*0.6, 2.5))) ax = sns.heatmap(cues, linewidths=.5, cbar=False, cmap=bw, linecolor='gray') _ = [t.set_color('r') if t.get_text().endswith('i') else t.set_color('g') for t in ax.xaxis.get_ticklabels()] ax.set_xlabel("Stimuli (green) and Inhibitors (red)") ax.set_ylabel("Experimental condition") plt.tight_layout() axes.append(ax) if filepath: plt.savefig(os.path.join(filepath, 'design-%s.pdf' % i)) return axes
python
{ "resource": "" }
q42564
differences_distribution
train
def differences_distribution(df, filepath=None): """ For each experimental design it plot all the corresponding generated differences in different plots Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns `id`, `pairs`, and starting with `DIF:` filepath: str Absolute path to a folder where to write the plots Returns ------- list Generated plots .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ axes = [] cols = df.columns for i, dd in df.groupby("id"): palette = sns.color_palette("Set1", len(dd)) plt.figure() readouts = dd.drop([c for c in cols if not c.startswith("DIF:")] + ["id"], axis=1).reset_index(drop=True) readouts.columns = [c[4:] for c in readouts.columns] ax1 = readouts.T.plot(kind='bar', stacked=True, color=palette) ax1.set_xlabel("Readout") ax1.set_ylabel("Pairwise differences") plt.tight_layout() if filepath: plt.savefig(os.path.join(filepath, 'design-%s-readouts.pdf' % i)) plt.figure() behaviors = dd[["pairs"]].reset_index(drop=True) ax2 = behaviors.plot.bar(color=palette, legend=False) ax2.set_xlabel("Experimental condition") ax2.set_ylabel("Pairs of input-output behaviors") plt.tight_layout() if filepath: plt.savefig(os.path.join(filepath, 'design-%s-behaviors.pdf' % i)) axes.append((ax1, ax2)) return axes
python
{ "resource": "" }
q42565
predictions_variance
train
def predictions_variance(df, filepath=None): """ Plots the mean variance prediction for each readout Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns starting with `VAR:` filepath: str Absolute path to a folder where to write the plots Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ df = df.filter(regex="^VAR:") by_readout = df.mean(axis=0).reset_index(level=0) by_readout.columns = ['Readout', 'Prediction variance (mean)'] by_readout['Readout'] = by_readout.Readout.map(lambda n: n[4:]) g1 = sns.factorplot(x='Readout', y='Prediction variance (mean)', data=by_readout, kind='bar', aspect=2) for tick in g1.ax.get_xticklabels(): tick.set_rotation(90) if filepath: g1.savefig(os.path.join(filepath, 'predictions-variance.pdf')) return g1
python
{ "resource": "" }
q42566
intervention_strategies
train
def intervention_strategies(df, filepath=None): """ Plots all intervention strategies Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns starting with `TR:` filepath: str Absolute path to a folder where to write the plot Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ logger = logging.getLogger("caspo") LIMIT = 50 if len(df) > LIMIT: msg = "Too many intervention strategies to visualize. A sample of %s strategies will be considered." % LIMIT logger.warning(msg) df = df.sample(LIMIT) values = np.unique(df.values.flatten()) if len(values) == 3: rwg = matplotlib.colors.ListedColormap(['red', 'white', 'green']) elif 1 in values: rwg = matplotlib.colors.ListedColormap(['white', 'green']) else: rwg = matplotlib.colors.ListedColormap(['red', 'white']) plt.figure(figsize=(max((len(df.columns)-1) * .5, 4), max(len(df)*0.6, 2.5))) df.columns = [c[3:] for c in df.columns] ax = sns.heatmap(df, linewidths=.5, cbar=False, cmap=rwg, linecolor='gray') ax.set_xlabel("Species") ax.set_ylabel("Intervention strategy") for tick in ax.get_xticklabels(): tick.set_rotation(90) plt.tight_layout() if filepath: plt.savefig(os.path.join(filepath, 'strategies.pdf')) return ax
python
{ "resource": "" }
q42567
interventions_frequency
train
def interventions_frequency(df, filepath=None): """ Plots the frequency of occurrence for each intervention Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns `frequency` and `intervention` filepath: str Absolute path to a folder where to write the plot Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ df = df.sort_values('frequency') df['conf'] = df.frequency.map(lambda f: 0 if f < 0.2 else 1 if f < 0.8 else 2) g = sns.factorplot(x="intervention", y="frequency", data=df, aspect=3, hue='conf', legend=False) for tick in g.ax.get_xticklabels(): tick.set_rotation(90) _ = [t.set_color('r') if t.get_text().endswith('-1') else t.set_color('g') for t in g.ax.xaxis.get_ticklabels()] g.ax.set_ylim([-.05, 1.05]) g.ax.set_xlabel("Intervention") g.ax.set_ylabel("Frequency") if filepath: g.savefig(os.path.join(filepath, 'interventions-frequency.pdf')) return g
python
{ "resource": "" }
q42568
Report.add_graph
train
def add_graph( self, y, x_label=None, y_label="", title="", x_run=None, y_run=None, svg_size_px=None, key_position="bottom right", ): """ Add a new graph to the overlap report. Args: y (str): Value plotted on y-axis. x_label (str): Label on x-axis. y_label (str): Label on y-axis. title (str): Title of the plot. x_run ((float,float)): x-range. y_run ((int,int)): y-rang. svg_size_px ((int,int): Size of SVG image in pixels. key_position (str): GnuPlot position of the legend. """ if x_run is None: x_run = self.default_x_run if y_run is None: y_run = self.default_y_run if svg_size_px is None: svg_size_px = self.default_svg_size_px for panel in self.panels: x_run = self._load_x_run(x_run) y_run = self._load_y_run(y_run) svg_size_px = self._load_svg_size_px(svg_size_px) panel.add_graph( y=y, x_run=x_run, y_run=y_run, svg_size_px=svg_size_px, y_label=y_label, x_label=x_label if x_label is not None else self.default_x_label, title=title, key_position=key_position, )
python
{ "resource": "" }
q42569
Report.clean
train
def clean(self): """Remove all temporary files.""" rnftools.utils.shell('rm -fR "{}" "{}"'.format(self.report_dir, self._html_fn))
python
{ "resource": "" }
q42570
Traceback.generate_plaintext_traceback
train
def generate_plaintext_traceback(self): """Like the plaintext attribute but returns a generator""" yield text_('Traceback (most recent call last):') for frame in self.frames: yield text_(' File "%s", line %s, in %s' % ( frame.filename, frame.lineno, frame.function_name )) yield text_(' ' + frame.current_line.strip()) yield text_(self.exception)
python
{ "resource": "" }
q42571
Frame.render_source
train
def render_source(self): """Render the sourcecode.""" return SOURCE_TABLE_HTML % text_('\n'.join(line.render() for line in self.get_annotated_lines()))
python
{ "resource": "" }
q42572
DwgSim.recode_dwgsim_reads
train
def recode_dwgsim_reads( dwgsim_prefix, fastq_rnf_fo, fai_fo, genome_id, estimate_unknown_values, number_of_read_tuples=10**9, ): """Convert DwgSim FASTQ file to RNF FASTQ file. Args: dwgsim_prefix (str): DwgSim prefix of the simulation (see its commandline parameters). fastq_rnf_fo (file): File object of RNF FASTQ. fai_fo (file): File object for FAI file of the reference genome. genome_id (int): RNF genome ID to be used. estimate_unknown_values (bool): Estimate unknown values (right coordinate of each end). number_of_read_tuples (int): Estimate of number of simulated read tuples (to set width). """ dwgsim_pattern = re.compile( '@(.*)_([0-9]+)_([0-9]+)_([01])_([01])_([01])_([01])_([0-9]+):([0-9]+):([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_(([0-9abcdef])+)' ) ### # DWGSIM read name format # # 1) contig name (chromsome name) # 2) start end 1 (one-based) # 3) start end 2 (one-based) # 4) strand end 1 (0 - forward, 1 - reverse) # 5) strand end 2 (0 - forward, 1 - reverse) # 6) random read end 1 (0 - from the mutated reference, 1 - random) # 7) random read end 2 (0 - from the mutated reference, 1 - random) # 8) number of sequencing errors end 1 (color errors for colorspace) # 9) number of SNPs end 1 # 10) number of indels end 1 # 11) number of sequencing errors end 2 (color errors for colorspace) # 12) number of SNPs end 2 # 13) number of indels end 2 # 14) read number (unique within a given contig/chromosome) ### fai_index = rnftools.utils.FaIdx(fai_fo=fai_fo) read_tuple_id_width = len(format(number_of_read_tuples, 'x')) # parsing FQ file read_tuple_id = 0 last_read_tuple_name = None old_fq = "{}.bfast.fastq".format(dwgsim_prefix) fq_creator = rnftools.rnfformat.FqCreator( fastq_fo=fastq_rnf_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator="dwgsim", ) i = 0 with open(old_fq, "r+") as f1: for line in f1: if i % 4 == 0: read_tuple_name = line[1:].strip() if read_tuple_name != last_read_tuple_name: new_tuple = True if last_read_tuple_name is not None: read_tuple_id += 1 else: new_tuple = False last_read_tuple_name = read_tuple_name m = dwgsim_pattern.search(line) if m is None: rnftools.utils.error( "Read tuple '{}' was not created by DwgSim.".format(line[1:]), program="RNFtools", subprogram="MIShmash", exception=ValueError, ) contig_name = m.group(1) start_1 = int(m.group(2)) start_2 = int(m.group(3)) direction_1 = "F" if int(m.group(4)) == 0 else "R" direction_2 = "F" if int(m.group(5)) == 0 else "R" # random_1 = bool(m.group(6)) # random_2 = bool(m.group(7)) # seq_err_1 = int(m.group(8)) # snp_1 = int(m.group(9)) # indels_1 = int(m.group(10)) # seq_err_2 = int(m.group(11)) # snp_2 = int(m.group(12)) # indels_2 = int(m.group(13)) # read_tuple_id_dwg = int(m.group(14), 16) chr_id = fai_index.dict_chr_ids[contig_name] if fai_index.dict_chr_ids != {} else "0" elif i % 4 == 1: bases = line.strip() if new_tuple: segment = rnftools.rnfformat.Segment( genome_id=genome_id, chr_id=chr_id, direction=direction_1, left=start_1, right=start_1 + len(bases) - 1 if estimate_unknown_values else 0, ) else: segment = rnftools.rnfformat.Segment( genome_id=genome_id, chr_id=chr_id, direction=direction_2, left=start_2, right=start_2 + len(bases) - 1 if estimate_unknown_values else 0, ) elif i % 4 == 2: pass elif i % 4 == 3: qualities = line.strip() fq_creator.add_read( read_tuple_id=read_tuple_id, bases=bases, qualities=qualities, segments=[segment], ) i += 1 fq_creator.flush_read_tuple()
python
{ "resource": "" }
q42573
task
train
def task(func): """Decorator to run the decorated function as a Task """ def task_wrapper(*args, **kwargs): return spawn(func, *args, **kwargs) return task_wrapper
python
{ "resource": "" }
q42574
Task.join
train
def join(self, timeout=None): """Wait for this Task to end. If a timeout is given, after the time expires the function will return anyway.""" if not self._started: raise RuntimeError('cannot join task before it is started') return self._exit_event.wait(timeout)
python
{ "resource": "" }
q42575
LogicalNetworkList.reset
train
def reset(self): """ Drop all networks in the list """ self.__matrix = np.array([]) self.__networks = np.array([])
python
{ "resource": "" }
q42576
LogicalNetworkList.split
train
def split(self, indices): """ Splits logical networks according to given indices Parameters ---------- indices : list 1-D array of sorted integers, the entries indicate where the array is split Returns ------- list List of :class:`caspo.core.logicalnetwork.LogicalNetworkList` object instances .. seealso:: `numpy.split <http://docs.scipy.org/doc/numpy/reference/generated/numpy.split.html#numpy-split>`_ """ return [LogicalNetworkList(self.hg, part) for part in np.split(self.__matrix, indices)]
python
{ "resource": "" }
q42577
LogicalNetworkList.to_funset
train
def to_funset(self): """ Converts the list of logical networks to a set of `gringo.Fun`_ instances Returns ------- set Representation of all networks as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set((gringo.Fun("variable", [var]) for var in self.hg.nodes)) formulas = set() for network in self: formulas = formulas.union(it.imap(lambda (_, f): f, network.formulas_iter())) formulas = pd.Series(list(formulas)) for i, network in enumerate(self): for v, f in network.formulas_iter(): fs.add(gringo.Fun("formula", [i, v, formulas[formulas == f].index[0]])) for formula_idx, formula in formulas.iteritems(): for clause in formula: clause_idx = self.hg.clauses_idx[clause] fs.add(gringo.Fun("dnf", [formula_idx, clause_idx])) for variable, sign in clause: fs.add(gringo.Fun("clause", [clause_idx, variable, sign])) return fs
python
{ "resource": "" }
q42578
LogicalNetworkList.to_dataframe
train
def to_dataframe(self, networks=False, dataset=None, size=False, n_jobs=-1): """ Converts the list of logical networks to a `pandas.DataFrame`_ object instance Parameters ---------- networks : boolean If True, a column with number of networks having the same behavior is included in the DataFrame dataset: Optional[:class:`caspo.core.dataset.Dataset`] If not None, a column with the MSE with respect to the given dataset is included in the DataFrame size: boolean If True, a column with the size of each logical network is included in the DataFrame n_jobs : int Number of jobs to run in parallel. Default to -1 (all cores available) Returns ------- `pandas.DataFrame`_ DataFrame representation of the list of logical networks. .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ length = len(self) df = pd.DataFrame(self.__matrix, columns=map(str, self.hg.mappings)) if networks: df = pd.concat([df, pd.DataFrame({'networks': self.__networks})], axis=1) if dataset is not None: clampings = dataset.clampings readouts = dataset.readouts.columns observations = dataset.readouts.values pos = ~np.isnan(observations) mse = Parallel(n_jobs=n_jobs)(delayed(__parallel_mse__)(n, clampings, readouts, observations[pos], pos) for n in self) df = pd.concat([df, pd.DataFrame({'mse': mse})], axis=1) if size: df = pd.concat([df, pd.DataFrame({'size': np.fromiter((n.size for n in self), int, length)})], axis=1) return df
python
{ "resource": "" }
q42579
LogicalNetworkList.to_csv
train
def to_csv(self, filename, networks=False, dataset=None, size=False, n_jobs=-1): """ Writes the list of logical networks to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file networks : boolean If True, a column with number of networks having the same behavior is included in the file dataset: Optional[:class:`caspo.core.dataset.Dataset`] If not None, a column with the MSE with respect to the given dataset is included size: boolean If True, a column with the size of each logical network is included n_jobs : int Number of jobs to run in parallel. Default to -1 (all cores available) """ self.to_dataframe(networks, dataset, size, n_jobs).to_csv(filename, index=False)
python
{ "resource": "" }
q42580
LogicalNetworkList.frequencies_iter
train
def frequencies_iter(self): """ Iterates over all non-zero frequencies of logical conjunction mappings in this list Yields ------ tuple[caspo.core.mapping.Mapping, float] The next pair (mapping,frequency) """ f = self.__matrix.mean(axis=0) for i, m in self.mappings.iteritems(): yield m, f[i]
python
{ "resource": "" }
q42581
LogicalNetworkList.predictions
train
def predictions(self, setup, n_jobs=-1): """ Returns a `pandas.DataFrame`_ with the weighted average predictions and variance of all readouts for each possible clampings in the given experimental setup. For each logical network the weight corresponds to the number of networks having the same behavior. Parameters ---------- setup : :class:`caspo.core.setup.Setup` Experimental setup n_jobs : int Number of jobs to run in parallel. Default to -1 (all cores available) Returns ------- `pandas.DataFrame`_ DataFrame with the weighted average predictions and variance of all readouts for each possible clamping .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe .. seealso:: `Wikipedia: Weighted sample variance <https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance>`_ """ stimuli, inhibitors, readouts = setup.stimuli, setup.inhibitors, setup.readouts nc = len(setup.cues()) predictions = np.zeros((len(self), 2**nc, len(setup))) predictions[:, :, :] = Parallel(n_jobs=n_jobs)(delayed(__parallel_predictions__)(n, list(setup.clampings_iter(setup.cues())), readouts, stimuli, inhibitors) for n in self) avg = np.average(predictions[:, :, nc:], axis=0, weights=self.__networks) var = np.average((predictions[:, :, nc:]-avg)**2, axis=0, weights=self.__networks) rcues = ["TR:%s" % c for c in setup.cues(True)] cols = np.concatenate([rcues, ["AVG:%s" % r for r in readouts], ["VAR:%s" % r for r in readouts]]) #use the first network predictions to extract all clampings df = pd.DataFrame(np.concatenate([predictions[0, :, :nc], avg, var], axis=1), columns=cols) df[rcues] = df[rcues].astype(int) return df
python
{ "resource": "" }
q42582
LogicalNetwork.to_graph
train
def to_graph(self): """ Converts the logical network to its underlying interaction graph Returns ------- caspo.core.graph.Graph The underlying interaction graph """ edges = set() for clause, target in self.edges_iter(): for source, signature in clause: edges.add((source, target, signature)) return Graph.from_tuples(edges)
python
{ "resource": "" }
q42583
LogicalNetwork.step
train
def step(self, state, clamping): """ Performs a simulation step from the given state and with respect to the given clamping Parameters ---------- state : dict The key-value mapping describing the current state of the logical network clamping : caspo.core.clamping.Clamping A clamping over variables in the logical network Returns ------- dict The key-value mapping describing the next state of the logical network """ ns = state.copy() for var in state: if clamping.has_variable(var): ns[var] = int(clamping.bool(var)) else: or_value = 0 for clause, _ in self.in_edges_iter(var): or_value = or_value or clause.bool(state) if or_value: break ns[var] = int(or_value) return ns
python
{ "resource": "" }
q42584
LogicalNetwork.predictions
train
def predictions(self, clampings, readouts, stimuli=None, inhibitors=None, nclampings=-1): """ Computes network predictions for the given iterable of clampings Parameters ---------- clampings : iterable Iterable over clampings readouts : list[str] List of readouts names stimuli : Optional[list[str]] List of stimuli names inhibitors : Optional[list[str]] List of inhibitors names nclampings : Optional[int] If greater than zero, it must be the number of clampings in the iterable. Otherwise, clampings must implement the special method :func:`__len__` Returns ------- `pandas.DataFrame`_ DataFrame with network predictions for each clamping. If stimuli and inhibitors are given, columns are included describing each clamping. Otherwise, columns correspond to readouts only. .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ stimuli, inhibitors = stimuli or [], inhibitors or [] cues = stimuli + inhibitors nc = len(cues) ns = len(stimuli) predictions = np.zeros((nclampings if nclampings > 0 else len(clampings), nc+len(readouts)), dtype=np.int8) for i, clamping in enumerate(clampings): if nc > 0: arr = clamping.to_array(cues) arr[np.where(arr[:ns] == -1)[0]] = 0 arr[ns + np.where(arr[ns:] == -1)[0]] = 1 predictions[i, :nc] = arr fixpoint = self.fixpoint(clamping) for j, readout in enumerate(readouts): predictions[i, nc+j] = fixpoint.get(readout, 0) return pd.DataFrame(predictions, columns=np.concatenate([stimuli, [i+'i' for i in inhibitors], readouts]))
python
{ "resource": "" }
q42585
LogicalNetwork.variables
train
def variables(self): """ Returns variables in the logical network Returns ------- set[str] Unique variables names """ variables = set() for v in self.nodes_iter(): if isinstance(v, Clause): for l in v: variables.add(l.variable) else: variables.add(v) return variables
python
{ "resource": "" }
q42586
LogicalNetwork.formulas_iter
train
def formulas_iter(self): """ Iterates over all variable-clauses in the logical network Yields ------ tuple[str,frozenset[caspo.core.clause.Clause]] The next tuple of the form (variable, set of clauses) in the logical network. """ for var in it.ifilter(self.has_node, self.variables()): yield var, frozenset(self.predecessors(var))
python
{ "resource": "" }
q42587
QuizSubmissionQuestionsAPI.answering_questions
train
def answering_questions(self, attempt, validation_token, quiz_submission_id, access_code=None, quiz_questions=None): """ Answering questions. Provide or update an answer to one or more QuizQuestions. """ path = {} data = {} params = {} # REQUIRED - PATH - quiz_submission_id """ID""" path["quiz_submission_id"] = quiz_submission_id # REQUIRED - attempt """The attempt number of the quiz submission being taken. Note that this must be the latest attempt index, as questions for earlier attempts can not be modified.""" data["attempt"] = attempt # REQUIRED - validation_token """The unique validation token you received when the Quiz Submission was created.""" data["validation_token"] = validation_token # OPTIONAL - access_code """Access code for the Quiz, if any.""" if access_code is not None: data["access_code"] = access_code # OPTIONAL - quiz_questions """Set of question IDs and the answer value. See {Appendix: Question Answer Formats} for the accepted answer formats for each question type.""" if quiz_questions is not None: data["quiz_questions"] = quiz_questions self.logger.debug("POST /api/v1/quiz_submissions/{quiz_submission_id}/questions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/quiz_submissions/{quiz_submission_id}/questions".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42588
QuizSubmissionQuestionsAPI.unflagging_question
train
def unflagging_question(self, id, attempt, validation_token, quiz_submission_id, access_code=None): """ Unflagging a question. Remove the flag that you previously set on a quiz question after you've returned to it. """ path = {} data = {} params = {} # REQUIRED - PATH - quiz_submission_id """ID""" path["quiz_submission_id"] = quiz_submission_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - attempt """The attempt number of the quiz submission being taken. Note that this must be the latest attempt index, as questions for earlier attempts can not be modified.""" data["attempt"] = attempt # REQUIRED - validation_token """The unique validation token you received when the Quiz Submission was created.""" data["validation_token"] = validation_token # OPTIONAL - access_code """Access code for the Quiz, if any.""" if access_code is not None: data["access_code"] = access_code self.logger.debug("PUT /api/v1/quiz_submissions/{quiz_submission_id}/questions/{id}/unflag with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/quiz_submissions/{quiz_submission_id}/questions/{id}/unflag".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42589
ClampingList.to_funset
train
def to_funset(self, lname="clamping", cname="clamped"): """ Converts the list of clampings to a set of `gringo.Fun`_ instances Parameters ---------- lname : str Predicate name for the clamping id cname : str Predicate name for the clamped variable Returns ------- set Representation of all clampings as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set() for i, clamping in enumerate(self): fs.add(gringo.Fun(lname, [i])) fs = fs.union(clamping.to_funset(i, cname)) return fs
python
{ "resource": "" }
q42590
ClampingList.to_dataframe
train
def to_dataframe(self, stimuli=None, inhibitors=None, prepend=""): """ Converts the list of clampigns to a `pandas.DataFrame`_ object instance Parameters ---------- stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning Returns ------- `pandas.DataFrame`_ DataFrame representation of the list of clampings .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ stimuli, inhibitors = stimuli or [], inhibitors or [] cues = stimuli + inhibitors nc = len(cues) ns = len(stimuli) variables = cues or np.array(list(set((v for (v, s) in it.chain.from_iterable(self))))) matrix = np.array([]) for clamping in self: arr = clamping.to_array(variables) if nc > 0: arr[np.where(arr[:ns] == -1)[0]] = 0 arr[ns + np.where(arr[ns:] == -1)[0]] = 1 if len(matrix): matrix = np.append(matrix, [arr], axis=0) else: matrix = np.array([arr]) return pd.DataFrame(matrix, columns=[prepend + "%s" % c for c in (stimuli + [i+'i' for i in inhibitors] if nc > 0 else variables)])
python
{ "resource": "" }
q42591
ClampingList.to_csv
train
def to_csv(self, filename, stimuli=None, inhibitors=None, prepend=""): """ Writes the list of clampings to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning """ self.to_dataframe(stimuli, inhibitors, prepend).to_csv(filename, index=False)
python
{ "resource": "" }
q42592
ClampingList.frequencies_iter
train
def frequencies_iter(self): """ Iterates over the frequencies of all clamped variables Yields ------ tuple[ caspo.core.literal.Literal, float ] The next tuple of the form (literal, frequency) """ df = self.to_dataframe() n = float(len(self)) for var, sign in it.product(df.columns, [-1, 1]): f = len(df[df[var] == sign]) / n if f > 0: yield Literal(var, sign), f
python
{ "resource": "" }
q42593
ClampingList.frequency
train
def frequency(self, literal): """ Returns the frequency of a clamped variable Parameters ---------- literal : :class:`caspo.core.literal.Literal` The clamped variable Returns ------- float The frequency of the given literal Raises ------ ValueError If the variable is not present in any of the clampings """ df = self.to_dataframe() if literal.variable in df.columns: return len(df[df[literal.variable] == literal.signature]) / float(len(self)) else: raise ValueError("Variable not found: %s" % literal.variable)
python
{ "resource": "" }
q42594
ClampingList.differences
train
def differences(self, networks, readouts, prepend=""): """ Returns the total number of pairwise differences over the given readouts for the given networks Parameters ---------- networks : iterable[:class:`caspo.core.logicalnetwork.LogicalNetwork`] Iterable of logical networks to compute pairwise differences readouts : list[str] List of readouts species names prepend : str Columns are renamed using the given string at the beginning Returns ------- `pandas.DataFrame`_ Total number of pairwise differences for each clamping over each readout .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ z, p = np.zeros((len(self), len(readouts)), dtype=int), np.zeros(len(self), dtype=int) for n1, n2 in it.combinations(networks, 2): r, c = np.where(n1.predictions(self, readouts) != n2.predictions(self, readouts)) z[r, c] += 1 p[r] += 1 df = pd.DataFrame(z, columns=[prepend + "%s" % c for c in readouts]) return pd.concat([df, pd.Series(p, name='pairs')], axis=1)
python
{ "resource": "" }
q42595
ClampingList.drop_literals
train
def drop_literals(self, literals): """ Returns a new list of clampings without the given literals Parameters ---------- literals : iterable[:class:`caspo.core.literal.Literal`] Iterable of literals to be removed from each clamping Returns ------- caspo.core.clamping.ClampingList The new list of clampings """ clampings = [] for clamping in self: c = clamping.drop_literals(literals) if len(c) > 0: clampings.append(c) return ClampingList(clampings)
python
{ "resource": "" }
q42596
Clamping.to_funset
train
def to_funset(self, index, name="clamped"): """ Converts the clamping to a set of `gringo.Fun`_ object instances Parameters ---------- index : int An external identifier to associate several clampings together in ASP name : str A function name for the clamping Returns ------- set The set of `gringo.Fun`_ object instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set() for var, sign in self: fs.add(gringo.Fun(name, [index, var, sign])) return fs
python
{ "resource": "" }
q42597
Clamping.to_array
train
def to_array(self, variables): """ Converts the clamping to a 1-D array with respect to the given variables Parameters ---------- variables : list[str] List of variables names Returns ------- `numpy.ndarray`_ 1-D array where position `i` correspond to the sign of the clamped variable at position `i` in the given list of variables .. _numpy.ndarray: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray """ arr = np.zeros(len(variables), np.int8) dc = dict(self) for i, var in enumerate(variables): arr[i] = dc.get(var, arr[i]) return arr
python
{ "resource": "" }
q42598
CustomGradebookColumnsAPI.create_custom_gradebook_column
train
def create_custom_gradebook_column(self, course_id, column_title, column_hidden=None, column_position=None, column_teacher_notes=None): """ Create a custom gradebook column. Create a custom gradebook column """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - column[title] """no description""" data["column[title]"] = column_title # OPTIONAL - column[position] """The position of the column relative to other custom columns""" if column_position is not None: data["column[position]"] = column_position # OPTIONAL - column[hidden] """Hidden columns are not displayed in the gradebook""" if column_hidden is not None: data["column[hidden]"] = column_hidden # OPTIONAL - column[teacher_notes] """Set this if the column is created by a teacher. The gradebook only supports one teacher_notes column.""" if column_teacher_notes is not None: data["column[teacher_notes]"] = column_teacher_notes self.logger.debug("POST /api/v1/courses/{course_id}/custom_gradebook_columns with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/custom_gradebook_columns".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42599
CustomGradebookColumnsAPI.update_column_data
train
def update_column_data(self, id, user_id, course_id, column_data_content): """ Update column data. Set the content of a custom column """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - column_data[content] """Column content. Setting this to blank will delete the datum object.""" data["column_data[content]"] = column_data_content self.logger.debug("PUT /api/v1/courses/{course_id}/custom_gradebook_columns/{id}/data/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/custom_gradebook_columns/{id}/data/{user_id}".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }