_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q42300 | instantiate_from_config | train | def instantiate_from_config(cfg):
"""Instantiate data types from config"""
for h in cfg:
if h.get("type") in data_types:
raise KeyError("Data type '%s' already exists" % h)
data_types[h.get("type")] = DataType(h) | python | {
"resource": ""
} |
q42301 | BaseCanvasAPI.extract_data_from_response | train | def extract_data_from_response(self, response, data_key=None):
"""Given a response and an optional data_key should return a dictionary of data returned as part of the response."""
response_json_data = response.json()
# Seems to be two types of response, a dict with keys and then lists of data or a flat list data with no key.
if type(response_json_data) == list:
# Return the data
return response_json_data
elif type(response_json_data) == dict:
if data_key is None:
return response_json_data
else:
return response_json_data[data_key]
else:
raise CanvasAPIError(response) | python | {
"resource": ""
} |
q42302 | BaseCanvasAPI.extract_pagination_links | train | def extract_pagination_links(self, response):
'''Given a wrapped_response from a Canvas API endpoint,
extract the pagination links from the response headers'''
try:
link_header = response.headers['Link']
except KeyError:
logger.warn('Unable to find the Link header. Unable to continue with pagination.')
return None
split_header = link_header.split(',')
exploded_split_header = [i.split(';') for i in split_header]
pagination_links = {}
for h in exploded_split_header:
link = h[0]
rel = h[1]
# Check that the link format is what we expect
if link.startswith('<') and link.endswith('>'):
link = link[1:-1]
else:
continue
# Extract the rel argument
m = self.rel_matcher.match(rel)
try:
rel = m.groups()[0]
except AttributeError:
# Match returned None, just skip.
continue
except IndexError:
# Matched but no groups returned
continue
pagination_links[rel] = link
return pagination_links | python | {
"resource": ""
} |
q42303 | BaseCanvasAPI.generic_request | train | def generic_request(self, method, uri,
all_pages=False,
data_key=None,
no_data=False,
do_not_process=False,
force_urlencode_data=False,
data=None,
params=None,
files=None,
single_item=False):
"""Generic Canvas Request Method."""
if not uri.startswith('http'):
uri = self.uri_for(uri)
if force_urlencode_data is True:
uri += '?' + urllib.urlencode(data)
assert method in ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS']
if method == 'GET':
response = self.session.get(uri, params=params)
elif method == 'POST':
response = self.session.post(uri, data=data, files=files)
elif method == 'PUT':
response = self.session.put(uri, data=data)
elif method == 'DELETE':
response = self.session.delete(uri, params=params)
elif method == 'HEAD':
response = self.session.head(uri, params=params)
elif method == 'OPTIONS':
response = self.session.options(uri, params=params)
response.raise_for_status()
if do_not_process is True:
return response
if no_data:
return response.status_code
if all_pages:
return self.depaginate(response, data_key)
if single_item:
r = response.json()
if data_key:
return r[data_key]
else:
return r
return response.json() | python | {
"resource": ""
} |
q42304 | BaseCanvasAPI._validate_iso8601_string | train | def _validate_iso8601_string(self, value):
"""Return the value or raise a ValueError if it is not a string in ISO8601 format."""
ISO8601_REGEX = r'(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})([+-](\d{2})\:(\d{2})|Z)'
if re.match(ISO8601_REGEX, value):
return value
else:
raise ValueError('{} must be in ISO8601 format.'.format(value)) | python | {
"resource": ""
} |
q42305 | create_database | train | def create_database(dbpath, schema='', overwrite=True):
"""
Create a new database at the given dbpath
Parameters
----------
dbpath: str
The full path for the new database, including the filename and .db file extension.
schema: str
The path to the .sql schema for the database
overwrite: bool
Overwrite dbpath if it already exists
"""
if dbpath.endswith('.db'):
if os.path.isfile(dbpath) and overwrite:
os.system('rm {}'.format(dbpath))
# Load the schema if given
if schema:
os.system("cat {} | sqlite3 {}".format(schema,dbpath))
# Otherwise just make an empty SOURCES table
else:
sources_table = "CREATE TABLE sources (id INTEGER PRIMARY KEY, ra REAL, dec REAL, designation TEXT, " \
"publication_id INTEGER, shortname TEXT, names TEXT, comments TEXT)"
os.system("sqlite3 {} '{}'".format(dbpath, sources_table))
if os.path.isfile(dbpath):
print(
"\nDatabase created! To load, run\n\ndb = astrodb.Database('{}')"
"\n\nThen run db.modify_table() method to create tables.".format(dbpath))
else:
print("Please provide a path and file name with a .db file extension, e.g. /Users/<username>/Desktop/test.db") | python | {
"resource": ""
} |
q42306 | adapt_array | train | def adapt_array(arr):
"""
Adapts a Numpy array into an ARRAY string to put into the database.
Parameters
----------
arr: array
The Numpy array to be adapted into an ARRAY type that can be inserted into a SQL file.
Returns
-------
ARRAY
The adapted array object
"""
out = io.BytesIO()
np.save(out, arr), out.seek(0)
return buffer(out.read()) | python | {
"resource": ""
} |
q42307 | convert_array | train | def convert_array(array):
"""
Converts an ARRAY string stored in the database back into a Numpy array.
Parameters
----------
array: ARRAY
The array object to be converted back into a Numpy array.
Returns
-------
array
The converted Numpy array.
"""
out = io.BytesIO(array)
out.seek(0)
return np.load(out) | python | {
"resource": ""
} |
q42308 | convert_image | train | def convert_image(File, verbose=False):
"""
Converts a IMAGE data type stored in the database into a data cube
Parameters
----------
File: str
The URL or filepath of the file to be converted into arrays.
verbose: bool
Whether or not to display some diagnostic information (Default: False)
Returns
-------
sequence
The converted image
"""
image, header = '', ''
if isinstance(File, type(b'')): # Decode if needed (ie, for Python 3)
File = File.decode('utf-8')
if isinstance(File, (str, type(u''))):
# Convert variable path to absolute path
if File.startswith('$'):
abspath = os.popen('echo {}'.format(File.split('/')[0])).read()[:-1]
if abspath:
File = File.replace(File.split('/')[0], abspath)
if File.startswith('http'):
if verbose:
print('Downloading {}'.format(File))
# Download only once
downloaded_file = download_file(File, cache=True)
else:
downloaded_file = File
try:
# Get the data
image, header = pf.getdata(downloaded_file, cache=True, header=True)
# If no data, then clear out all retrieved info for object
if not isinstance(image, np.ndarray):
image = None
if verbose:
print('Read as FITS...')
except (IOError, KeyError):
# Check if the FITS file is just Numpy arrays
try:
image, header = pf.getdata(downloaded_file, cache=True, header=True)
if verbose:
print('Read as FITS Numpy array...')
except (IOError, KeyError):
try: # Try ascii
image = ii.read(downloaded_file)
image = np.array([np.asarray(image.columns[n]) for n in range(len(image.columns))])
if verbose:
print('Read as ascii...')
txt, header = open(downloaded_file), []
for i in txt:
if any([i.startswith(char) for char in ['#', '|', '\\']]):
header.append(i.replace('\n', ''))
txt.close()
except:
pass
if image == '':
print('Could not retrieve image at {}.'.format(File))
return File
else:
image = Image(image, header, File)
return image | python | {
"resource": ""
} |
q42309 | pprint | train | def pprint(data, names='', title='', formats={}):
"""
Prints tables with a bit of formatting
Parameters
----------
data: (sequence, dict, table)
The data to print in the table
names: sequence
The column names
title: str (optional)
The title of the table
formats: dict
A dictionary of column:format values
"""
# Make the data into a table if it isn't already
if type(data) != at.Table:
data = at.Table(data, names=names)
# Make a copy
pdata = data.copy()
# Put the title in the metadata
try:
title = title or pdata.meta['name']
except:
pass
# Shorten the column names for slimmer data
for old, new in zip(*[pdata.colnames, [
i.replace('wavelength', 'wav').replace('publication', 'pub').replace('instrument', 'inst')\
.replace('telescope','scope') for i in pdata.colnames]]):
pdata.rename_column(old, new) if new != old else None
# Format the columns
formats.update({'comments': '%.15s', 'obs_date': '%.10s', 'names': '%.30s', 'description': '%.50s'})
# print it!
if title: print('\n' + title)
try:
ii.write(pdata, sys.stdout, Writer=ii.FixedWidthTwoLine, formats=formats, fill_values=[('None', '-')])
except UnicodeDecodeError: # Fix for Unicode characters. Print out in close approximation to ii.write()
max_length = 50
str_lengths = dict()
for key in pdata.keys():
lengths = map(lambda x: len(str(x).decode('utf-8')), pdata[key].data)
lengths.append(len(key))
str_lengths[key] = min(max(lengths), max_length)
print(' '.join(key.rjust(str_lengths[key]) for key in pdata.keys()))
print(' '.join('-' * str_lengths[key] for key in pdata.keys()))
for i in pdata:
print(' '.join([str(i[key]).decode('utf-8')[:max_length].rjust(str_lengths[key])
if i[key] else '-'.rjust(str_lengths[key]) for key in pdata.keys()])) | python | {
"resource": ""
} |
q42310 | Database.add_changelog | train | def add_changelog(self, user="", mod_tables="", user_desc=""):
"""
Add an entry to the changelog table. This should be run when changes or edits are done to the database.
Parameters
----------
user: str
Name of the person who made the edits
mod_tables: str
Table or tables that were edited
user_desc: str
A short message describing the changes
"""
import datetime
import socket
# Spit out warning messages if the user does not provide the needed information
if user == "" or mod_tables == "" or user_desc == "":
print("You must supply your name, the name(s) of table(s) edited, "
"and a description for add_changelog() to work.")
raise InputError('Did not supply the required input, see help(db.add_changelog) for more information.\n'
'Your inputs: \n\t user = {}\n\t mod_tables = {}\n\t user_desc = {}'.format(user, mod_tables, user_desc))
# Making tables all uppercase for consistency
mod_tables = mod_tables.upper()
data = list()
data.append(['date', 'user', 'machine_name', 'modified_tables', 'user_description'])
datestr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
machine = socket.gethostname()
data.append([datestr, user, machine, mod_tables, user_desc])
self.add_data(data, 'changelog') | python | {
"resource": ""
} |
q42311 | Database.close | train | def close(self, silent=False):
"""
Close the database and ask to save and delete the file
Parameters
----------
silent: bool
Close quietly without saving or deleting (Default: False).
"""
if not silent:
saveme = get_input("Save database contents to '{}/'? (y, [n]) \n"
"To save elsewhere, run db.save() before closing. ".format(self.directory))
if saveme.lower() == 'y':
self.save()
delete = get_input("Do you want to delete {0}? (y,[n]) \n"
"Don't worry, a new one will be generated if you run astrodb.Database('{1}') "
.format(self.dbpath, self.sqlpath))
if delete.lower() == 'y':
print("Deleting {}".format(self.dbpath))
os.system("rm {}".format(self.dbpath))
print('Closing connection')
self.conn.close() | python | {
"resource": ""
} |
q42312 | Database.get_bibtex | train | def get_bibtex(self, id, fetch=False, table='publications'):
"""
Grab bibtex entry from NASA ADS
Parameters
----------
id: int or str
The id or shortname from the PUBLICATIONS table to search
fetch: bool
Whether or not to return the bibtex string in addition to printing (default: False)
table: str
Table name, defaults to publications
Returns
-------
bibtex: str
If fetch=True, return the bibtex string
"""
import requests
bibcode_name = 'bibcode'
if isinstance(id, type(1)):
bibcode = self.query("SELECT {} FROM {} WHERE id={}".format(bibcode_name, table, id),
fetch='one')
else:
bibcode = self.query("SELECT {} FROM {} WHERE shortname='{}'".format(bibcode_name, table, id),
fetch='one')
# Check for empty bibcodes
if isinstance(bibcode, type(None)):
print('No bibcode for {}'.format(id))
return
bibcode = bibcode[0]
if bibcode == '':
print('No bibcode for {}'.format(id))
return
# Construct URL and grab data
url = 'http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode={}&data_type=BIBTEX&db_key=AST&nocookieset=1'.\
format(bibcode)
try:
r = requests.get(url)
except Exception as ex:
print('Error accessing url {}'.format(url))
print(ex.message)
return
# Check status and display results
if r.status_code == 200:
ind = r.content.find(b'@')
print(r.content[ind:].strip().decode('utf-8'))
if fetch:
return r.content[ind:].strip()
else:
print('Error getting bibtex')
return | python | {
"resource": ""
} |
q42313 | Database.info | train | def info(self):
"""
Prints out information for the loaded database, namely the available tables and the number of entries for each.
"""
t = self.query("SELECT * FROM sqlite_master WHERE type='table'", fmt='table')
all_tables = t['name'].tolist()
print('\nDatabase path: {} \nSQL path: {}\n'.format(self.dbpath, self.sqlpath))
print('Database Inventory')
print('==================')
for table in ['sources'] + [t for t in all_tables if
t not in ['sources', 'sqlite_sequence']]:
x = self.query('select count() from {}'.format(table), fmt='array', fetch='one')
if x is None: continue
print('{}: {}'.format(table.upper(), x[0])) | python | {
"resource": ""
} |
q42314 | Database._lowest_rowids | train | def _lowest_rowids(self, table, limit):
"""
Gets the lowest available row ids for table insertion. Keeps things tidy!
Parameters
----------
table: str
The name of the table being modified
limit: int
The number of row ids needed
Returns
-------
available: sequence
An array of all available row ids
"""
try:
t = self.query("SELECT id FROM {}".format(table), unpack=True, fmt='table')
ids = t['id']
all_ids = np.array(range(1, max(ids)))
except TypeError:
ids = None
all_ids = np.array(range(1, limit+1))
available = all_ids[np.in1d(all_ids, ids, assume_unique=True, invert=True)][:limit]
# If there aren't enough empty row ids, start using the new ones
if len(available) < limit:
diff = limit - len(available)
available = np.concatenate((available, np.array(range(max(ids) + 1, max(ids) + 1 + diff))))
return available | python | {
"resource": ""
} |
q42315 | Database.output_spectrum | train | def output_spectrum(self, spectrum, filepath, header={}):
"""
Prints a file of the given spectrum to an ascii file with specified filepath.
Parameters
----------
spectrum: int, sequence
The id from the SPECTRA table or a [w,f,e] sequence
filepath: str
The path of the file to print the data to.
header: dict
A dictionary of metadata to add of update in the header
"""
# If an integer is supplied, get the spectrum from the SPECTRA table
if isinstance(spectrum, int):
data = self.query("SELECT * FROM spectra WHERE id={}".format(spectrum), fetch='one', fmt='dict')
try:
data['header'] = list(map(list, data['spectrum'].header.cards)) + [[k, v, ''] for k, v in
header.items()]
except:
data['header'] = ''
# If a [w,f,e] sequence is supplied, make it into a Spectrum object
elif isinstance(spectrum, (list, tuple, np.ndarray)):
data = {'spectrum': Spectrum(spectrum, header=header), 'wavelength_units': '', 'flux_units': ''}
try:
data['header'] = list(map(list, data['spectrum'].header.cards))
except:
data['header'] = ''
if data:
fn = filepath if filepath.endswith('.txt') else filepath + 'spectrum.txt'
# Write the header
if data['header']:
for n, line in enumerate(data['header']):
data['header'][n] = ['# {}'.format(str(line[0])).ljust(10)[:10],
'{:50s} / {}'.format(*map(str, line[1:]))]
try:
ii.write([np.asarray(i) for i in np.asarray(data['header']).T], fn, delimiter='\t',
format='no_header')
except IOError:
pass
# Write the data
names = ['# wavelength [{}]'.format(data['wavelength_units']), 'flux [{}]'.format(data['flux_units'])]
if len(data['spectrum'].data) == 3:
if type(data['spectrum'].data[2]) in [np.ndarray, list]:
names += ['unc [{}]'.format(data['flux_units'])]
else:
data['spectrum'].data = data['spectrum'].data[:2]
with open(fn, mode='a') as f:
ii.write([np.asarray(i, dtype=np.float64) for i in data['spectrum'].data], f, names=names,
delimiter='\t')
else:
print("Could not output spectrum: {}".format(spectrum)) | python | {
"resource": ""
} |
q42316 | Database.schema | train | def schema(self, table):
"""
Print the table schema
Parameters
----------
table: str
The table name
"""
try:
pprint(self.query("PRAGMA table_info({})".format(table), fmt='table'))
except ValueError:
print('Table {} not found'.format(table)) | python | {
"resource": ""
} |
q42317 | Database.snapshot | train | def snapshot(self, name_db='export.db', version=1.0):
"""
Function to generate a snapshot of the database by version number.
Parameters
----------
name_db: string
Name of the new database (Default: export.db)
version: float
Version number to export (Default: 1.0)
"""
# Check if file exists
if os.path.isfile(name_db):
import datetime
date = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M")
print("Renaming existing file {} to {}".format(name_db, name_db.replace('.db', date + '.db')))
os.system("mv {} {}".format(name_db, name_db.replace('.db', date + '.db')))
# Create a new database from existing database schema
t, = self.query("select sql from sqlite_master where type = 'table'", unpack=True)
schema = ';\n'.join(t) + ';'
os.system("sqlite3 {} '{}'".format(name_db, schema))
# Attach database to newly created database
db = Database(name_db)
db.list('PRAGMA foreign_keys=OFF') # Temporarily deactivate foreign keys for the snapshot
db.list("ATTACH DATABASE '{}' AS orig".format(self.dbpath))
# For each table in database, insert records if they match version number
t = db.query("SELECT * FROM sqlite_master WHERE type='table'", fmt='table')
all_tables = t['name'].tolist()
for table in [t for t in all_tables if t not in ['sqlite_sequence', 'changelog']]:
# Check if this is table with version column
metadata = db.query("PRAGMA table_info({})".format(table), fmt='table')
columns, types, required, pk = [np.array(metadata[n]) for n in ['name', 'type', 'notnull', 'pk']]
print(table, columns)
if 'version' not in columns:
db.modify("INSERT INTO {0} SELECT * FROM orig.{0}".format(table))
else:
db.modify("INSERT INTO {0} SELECT * FROM orig.{0} WHERE orig.{0}.version<={1}".format(table, version))
# Detach original database
db.list('DETACH DATABASE orig')
db.list('PRAGMA foreign_keys=ON')
db.close(silent=True) | python | {
"resource": ""
} |
q42318 | read | train | def read(path):
"""Read a secret from Vault REST endpoint"""
url = '{}/{}/{}'.format(settings.VAULT_BASE_URL.rstrip('/'),
settings.VAULT_BASE_SECRET_PATH.strip('/'),
path.lstrip('/'))
headers = {'X-Vault-Token': settings.VAULT_ACCESS_TOKEN}
resp = requests.get(url, headers=headers)
if resp.ok:
return resp.json()['data']
else:
log.error('Failed VAULT GET request: %s %s', resp.status_code, resp.text)
raise Exception('Failed Vault GET request: {} {}'.format(resp.status_code, resp.text)) | python | {
"resource": ""
} |
q42319 | RabaConnection.getIndexes | train | def getIndexes(self, rabaOnly = True) :
"returns a list of all indexes in the sql database. rabaOnly returns only the indexes created by raba"
sql = "SELECT * FROM sqlite_master WHERE type='index'"
cur = self.execute(sql)
l = []
for n in cur :
if rabaOnly :
if n[1].lower().find('raba') == 0 :
l.append(n)
else :
l.append(n)
return l | python | {
"resource": ""
} |
q42320 | RabaConnection.flushIndexes | train | def flushIndexes(self) :
"drops all indexes created by Raba"
for n in self.getIndexes(rabaOnly = True) :
self.dropIndexByName(n[1]) | python | {
"resource": ""
} |
q42321 | RabaConnection.enableStats | train | def enableStats(self, bol, logQueries = False) :
"If bol == True, Raba will keep a count of every query time performed, logQueries == True it will also keep a record of all the queries "
self._enableStats = bol
self._logQueries = logQueries
if bol :
self._enableStats = True
self.eraseStats()
self.startTime = time.time() | python | {
"resource": ""
} |
q42322 | RabaConnection.execute | train | def execute(self, sql, values = ()) :
"executes an sql command for you or appends it to the current transacations. returns a cursor"
sql = sql.strip()
self._debugActions(sql, values)
cur = self.connection.cursor()
cur.execute(sql, values)
return cur | python | {
"resource": ""
} |
q42323 | RabaConnection.initateSave | train | def initateSave(self, obj) :
"""Tries to initiates a save sessions. Each object can only be saved once during a session.
The session begins when a raba object initates it and ends when this object and all it's dependencies have been saved"""
if self.saveIniator != None :
return False
self.saveIniator = obj
return True | python | {
"resource": ""
} |
q42324 | RabaConnection.freeSave | train | def freeSave(self, obj) :
"""THIS IS WHERE COMMITS TAKE PLACE!
Ends a saving session, only the initiator can end a session. The commit is performed at the end of the session"""
if self.saveIniator is obj and not self.inTransaction :
self.saveIniator = None
self.savedObject = set()
self.connection.commit()
return True
return False | python | {
"resource": ""
} |
q42325 | RabaConnection.registerSave | train | def registerSave(self, obj) :
"""Each object can only be save donce during a session, returns False if the object has already been saved. True otherwise"""
if obj._runtimeId in self.savedObject :
return False
self.savedObject.add(obj._runtimeId)
return True | python | {
"resource": ""
} |
q42326 | RabaConnection.getLastRabaId | train | def getLastRabaId(self, cls) :
"""keep track all loaded raba classes"""
self.loadedRabaClasses[cls.__name__] = cls
sql = 'SELECT MAX(raba_id) from %s LIMIT 1' % (cls.__name__)
cur = self.execute(sql)
res = cur.fetchone()
try :
return int(res[0])+1
except TypeError:
return 0 | python | {
"resource": ""
} |
q42327 | RabaConnection.createTable | train | def createTable(self, tableName, strFields) :
'creates a table and resturns the ursor, if the table already exists returns None'
if not self.tableExits(tableName) :
sql = 'CREATE TABLE %s ( %s)' % (tableName, strFields)
self.execute(sql)
self.tables.add(tableName)
return True
return False | python | {
"resource": ""
} |
q42328 | RabaConnection.dropColumnsFromRabaObjTable | train | def dropColumnsFromRabaObjTable(self, name, lstFieldsToKeep) :
"Removes columns from a RabaObj table. lstFieldsToKeep should not contain raba_id or json fileds"
if len(lstFieldsToKeep) == 0 :
raise ValueError("There are no fields to keep")
cpy = name+'_copy'
sqlFiledsStr = ', '.join(lstFieldsToKeep)
self.createTable(cpy, 'raba_id INTEGER PRIMARY KEY AUTOINCREMENT, json, %s' % (sqlFiledsStr))
sql = "INSERT INTO %s SELECT %s FROM %s;" % (cpy, 'raba_id, json, %s' % sqlFiledsStr, name)
self.execute(sql)
self.dropTable(name)
self.renameTable(cpy, name) | python | {
"resource": ""
} |
q42329 | suser.get_likes | train | def get_likes(self, offset=0, limit=50):
""" Get user's likes. """
response = self.client.get(
self.client.USER_LIKES % (self.name, offset, limit))
return self._parse_response(response, strack) | python | {
"resource": ""
} |
q42330 | suser.get_tracks | train | def get_tracks(self, offset=0, limit=50):
""" Get user's tracks. """
response = self.client.get(
self.client.USER_TRACKS % (self.name, offset, limit))
return self._parse_response(response, strack) | python | {
"resource": ""
} |
q42331 | suser.get_playlists | train | def get_playlists(self, offset=0, limit=50):
""" Get user's playlists. """
response = self.client.get(
self.client.USER_PLAYLISTS % (self.name, offset, limit))
return self._parse_response(response, splaylist)
return playlists | python | {
"resource": ""
} |
q42332 | suser._parse_response | train | def _parse_response(self, response, target_object=strack):
""" Generic response parser method """
objects = json.loads(response.read().decode("utf-8"))
list = []
for obj in objects:
list.append(target_object(obj, client=self.client))
return list | python | {
"resource": ""
} |
q42333 | SimpleBayes.calculate_category_probability | train | def calculate_category_probability(self):
"""
Caches the individual probabilities for each category
"""
total_tally = 0.0
probs = {}
for category, bayes_category in \
self.categories.get_categories().items():
count = bayes_category.get_tally()
total_tally += count
probs[category] = count
# Calculating the probability
for category, count in probs.items():
if total_tally > 0:
probs[category] = float(count)/float(total_tally)
else:
probs[category] = 0.0
for category, probability in probs.items():
self.probabilities[category] = {
# Probability that any given token is of this category
'prc': probability,
# Probability that any given token is not of this category
'prnc': sum(probs.values()) - probability
} | python | {
"resource": ""
} |
q42334 | SimpleBayes.train | train | def train(self, category, text):
"""
Trains a category with a sample of text
:param category: the name of the category we want to train
:type category: str
:param text: the text we want to train the category with
:type text: str
"""
try:
bayes_category = self.categories.get_category(category)
except KeyError:
bayes_category = self.categories.add_category(category)
tokens = self.tokenizer(str(text))
occurrence_counts = self.count_token_occurrences(tokens)
for word, count in occurrence_counts.items():
bayes_category.train_token(word, count)
# Updating our per-category overall probabilities
self.calculate_category_probability() | python | {
"resource": ""
} |
q42335 | SimpleBayes.untrain | train | def untrain(self, category, text):
"""
Untrains a category with a sample of text
:param category: the name of the category we want to train
:type category: str
:param text: the text we want to untrain the category with
:type text: str
"""
try:
bayes_category = self.categories.get_category(category)
except KeyError:
return
tokens = self.tokenizer(str(text))
occurance_counts = self.count_token_occurrences(tokens)
for word, count in occurance_counts.items():
bayes_category.untrain_token(word, count)
# Updating our per-category overall probabilities
self.calculate_category_probability() | python | {
"resource": ""
} |
q42336 | SimpleBayes.classify | train | def classify(self, text):
"""
Chooses the highest scoring category for a sample of text
:param text: sample text to classify
:type text: str
:return: the "winning" category
:rtype: str
"""
score = self.score(text)
if not score:
return None
return sorted(score.items(), key=lambda v: v[1])[-1][0] | python | {
"resource": ""
} |
q42337 | SimpleBayes.score | train | def score(self, text):
"""
Scores a sample of text
:param text: sample text to score
:type text: str
:return: dict of scores per category
:rtype: dict
"""
occurs = self.count_token_occurrences(self.tokenizer(text))
scores = {}
for category in self.categories.get_categories().keys():
scores[category] = 0
categories = self.categories.get_categories().items()
for word, count in occurs.items():
token_scores = {}
# Adding up individual token scores
for category, bayes_category in categories:
token_scores[category] = \
float(bayes_category.get_token_count(word))
# We use this to get token-in-category probabilities
token_tally = sum(token_scores.values())
# If this token isn't found anywhere its probability is 0
if token_tally == 0.0:
continue
# Calculating bayes probabiltity for this token
# http://en.wikipedia.org/wiki/Naive_Bayes_spam_filtering
for category, token_score in token_scores.items():
# Bayes probability * the number of occurances of this token
scores[category] += count * \
self.calculate_bayesian_probability(
category,
token_score,
token_tally
)
# Removing empty categories from the results
final_scores = {}
for category, score in scores.items():
if score > 0:
final_scores[category] = score
return final_scores | python | {
"resource": ""
} |
q42338 | SimpleBayes.tally | train | def tally(self, category):
"""
Gets the tally for a requested category
:param category: The category we want a tally for
:type category: str
:return: tally for a given category
:rtype: int
"""
try:
bayes_category = self.categories.get_category(category)
except KeyError:
return 0
return bayes_category.get_tally() | python | {
"resource": ""
} |
q42339 | SimpleBayes.get_cache_location | train | def get_cache_location(self):
"""
Gets the location of the cache file
:return: the location of the cache file
:rtype: string
"""
filename = self.cache_path if \
self.cache_path[-1:] == '/' else \
self.cache_path + '/'
filename += self.cache_file
return filename | python | {
"resource": ""
} |
q42340 | SimpleBayes.cache_persist | train | def cache_persist(self):
"""
Saves the current trained data to the cache.
This is initiated by the program using this module
"""
filename = self.get_cache_location()
pickle.dump(self.categories, open(filename, 'wb')) | python | {
"resource": ""
} |
q42341 | SimpleBayes.cache_train | train | def cache_train(self):
"""
Loads the data for this classifier from a cache file
:return: whether or not we were successful
:rtype: bool
"""
filename = self.get_cache_location()
if not os.path.exists(filename):
return False
categories = pickle.load(open(filename, 'rb'))
assert isinstance(categories, BayesCategories), \
"Cache data is either corrupt or invalid"
self.categories = categories
# Updating our per-category overall probabilities
self.calculate_category_probability()
return True | python | {
"resource": ""
} |
q42342 | ContributorRole.create_feature_type_rates | train | def create_feature_type_rates(self, created=False):
"""
If the role is being created we want to populate a rate for all existing feature_types.
"""
if created:
for feature_type in FeatureType.objects.all():
FeatureTypeRate.objects.create(role=self, feature_type=feature_type, rate=0) | python | {
"resource": ""
} |
q42343 | Handler.addToService | train | def addToService(self, service, namespace=None, seperator='.'):
"""
Add this Handler's exported methods to an RPC Service instance.
"""
if namespace is None:
namespace = []
if isinstance(namespace, basestring):
namespace = [namespace]
for n, m in inspect.getmembers(self, inspect.ismethod):
if hasattr(m, 'export_rpc'):
try:
name = seperator.join(namespace + m.export_rpc)
except TypeError:
name = seperator.join(namespace + [m.export_rpc])
service.add(m, name) | python | {
"resource": ""
} |
q42344 | print_progress_bar | train | def print_progress_bar(text, done, total, width):
"""
Print progress bar.
"""
if total > 0:
n = int(float(width) * float(done) / float(total))
sys.stdout.write("\r{0} [{1}{2}] ({3}/{4})".format(text, '#' * n, ' ' * (width - n), done, total))
sys.stdout.flush() | python | {
"resource": ""
} |
q42345 | fetch_modules | train | def fetch_modules(config, relative_path, download_directory):
"""
Assemble modules which will
be included in CMakeLists.txt.
"""
from collections import Iterable, namedtuple, defaultdict
from autocmake.extract import extract_list, to_d, to_l
from autocmake.parse_rst import parse_cmake_module
cleaned_config = defaultdict(lambda: [])
modules = []
Module = namedtuple('Module', 'path name')
num_sources = len(extract_list(config, 'source'))
print_progress_bar(text='- assembling modules:',
done=0,
total=num_sources,
width=30)
if 'modules' in config:
i = 0
for t in config['modules']:
for k, v in t.items():
d = to_d(v)
for _k, _v in to_d(v).items():
cleaned_config[_k] = flat_add(cleaned_config[_k], _v)
# fetch sources and parse them
if 'source' in d:
for src in to_l(d['source']):
i += 1
# we download the file
module_name = os.path.basename(src)
if 'http' in src:
path = download_directory
name = 'autocmake_{0}'.format(module_name)
dst = os.path.join(download_directory, 'autocmake_{0}'.format(module_name))
fetch_url(src, dst)
file_name = dst
fetch_dst_directory = download_directory
else:
if os.path.exists(src):
path = os.path.dirname(src)
name = module_name
file_name = src
fetch_dst_directory = path
else:
sys.stderr.write("ERROR: {0} does not exist\n".format(src))
sys.exit(-1)
# we infer config from the module documentation
# dictionary d overrides the configuration in the module documentation
# this allows to override interpolation inside the module
with open(file_name, 'r') as f:
parsed_config = parse_cmake_module(f.read(), d)
for _k2, _v2 in parsed_config.items():
if _k2 not in to_d(v):
# we add to clean_config only if the entry does not exist
# in parent autocmake.yml already
# this allows to override
cleaned_config[_k2] = flat_add(cleaned_config[_k2], _v2)
modules.append(Module(path=path, name=name))
print_progress_bar(text='- assembling modules:',
done=i,
total=num_sources,
width=30)
print('')
return modules, cleaned_config | python | {
"resource": ""
} |
q42346 | fetch_url | train | def fetch_url(src, dst):
"""
Fetch file from URL src and save it to dst.
"""
# we do not use the nicer sys.version_info.major
# for compatibility with Python < 2.7
if sys.version_info[0] > 2:
import urllib.request
class URLopener(urllib.request.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
sys.stderr.write("ERROR: could not fetch {0}\n".format(url))
sys.exit(-1)
else:
import urllib
class URLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
sys.stderr.write("ERROR: could not fetch {0}\n".format(url))
sys.exit(-1)
dirname = os.path.dirname(dst)
if dirname != '':
if not os.path.isdir(dirname):
os.makedirs(dirname)
opener = URLopener()
opener.retrieve(src, dst) | python | {
"resource": ""
} |
q42347 | PollSubmissionsAPI.get_single_poll_submission | train | def get_single_poll_submission(self, id, poll_id, poll_session_id):
"""
Get a single poll submission.
Returns the poll submission with the given id
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - poll_id
"""ID"""
path["poll_id"] = poll_id
# REQUIRED - PATH - poll_session_id
"""ID"""
path["poll_session_id"] = poll_session_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("GET /api/v1/polls/{poll_id}/poll_sessions/{poll_session_id}/poll_submissions/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/polls/{poll_id}/poll_sessions/{poll_session_id}/poll_submissions/{id}".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q42348 | PollSubmissionsAPI.create_single_poll_submission | train | def create_single_poll_submission(self, poll_id, poll_session_id, poll_submissions_poll_choice_id):
"""
Create a single poll submission.
Create a new poll submission for this poll session
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - poll_id
"""ID"""
path["poll_id"] = poll_id
# REQUIRED - PATH - poll_session_id
"""ID"""
path["poll_session_id"] = poll_session_id
# REQUIRED - poll_submissions[poll_choice_id]
"""The chosen poll choice for this submission."""
data["poll_submissions[poll_choice_id]"] = poll_submissions_poll_choice_id
self.logger.debug("POST /api/v1/polls/{poll_id}/poll_sessions/{poll_session_id}/poll_submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/polls/{poll_id}/poll_sessions/{poll_session_id}/poll_submissions".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q42349 | HasPermissionOrIsAuthor.has_object_permission | train | def has_object_permission(self, request, view, obj):
"""determines if requesting user has permissions for the object
:param request: WSGI request object - where we get the user from
:param view: the view calling for permission
:param obj: the object in question
:return: `bool`
"""
# Give permission if we're not protecting this method
if self.protected_methods and request.method not in self.protected_methods:
return True
user = getattr(request, "user", None)
if not user or user.is_anonymous():
return False
if self.require_staff and not user.is_staff:
return False
# if they have higher-level privileges we can return true right now
if user.has_perms(self.permissions):
return True
# no? ok maybe they're the author and have appropriate author permissions.
authors_field = getattr(obj, self.authors_field, None)
if not authors_field:
return False
if self.author_permissions and not user.has_perms(self.author_permissions):
return False
return user in authors_field.all() | python | {
"resource": ""
} |
q42350 | CanEditCmsNotifications.has_permission | train | def has_permission(self, request, view):
"""If method is GET, user can access, if method is PUT or POST user must be a superuser.
"""
has_permission = False
if request.method == "GET" \
or request.method in ["PUT", "POST", "DELETE"] \
and request.user and request.user.is_superuser:
has_permission = True
return has_permission | python | {
"resource": ""
} |
q42351 | Api.bind | train | def bind(self, app):
"""Bind API to Muffin."""
self.parent = app
app.add_subapp(self.prefix, self.app) | python | {
"resource": ""
} |
q42352 | Api.register | train | def register(self, *paths, methods=None, name=None):
"""Register handler to the API."""
if isinstance(methods, str):
methods = [methods]
def wrapper(handler):
if isinstance(handler, (FunctionType, MethodType)):
handler = RESTHandler.from_view(handler, *(methods or ['GET']))
if handler.name in self.handlers:
raise muffin.MuffinException('Handler is already registered: %s' % handler.name)
self.handlers[tuple(paths or ["/{0}/{{{0}}}".format(handler.name)])] = handler
handler.bind(self.app, *paths, methods=methods, name=name or handler.name)
return handler
# Support for @app.register(func)
if len(paths) == 1 and callable(paths[0]):
view = paths[0]
paths = []
return wrapper(view)
return wrapper | python | {
"resource": ""
} |
q42353 | Api.swagger_schema | train | def swagger_schema(self, request):
"""Render API Schema."""
if self.parent is None:
return {}
spec = APISpec(
self.parent.name, self.parent.cfg.get('VERSION', ''),
plugins=['apispec.ext.marshmallow'], basePatch=self.prefix
)
for paths, handler in self.handlers.items():
spec.add_tag({
'name': handler.name,
'description': utils.dedent(handler.__doc__ or ''),
})
for path in paths:
operations = {}
for http_method in handler.methods:
method = getattr(handler, http_method.lower())
operation = OrderedDict({
'tags': [handler.name],
'summary': method.__doc__,
'produces': ['application/json'],
'responses': {200: {'schema': {'$ref': {'#/definitions/' + handler.name}}}}
})
operation.update(utils.load_yaml_from_docstring(method.__doc__) or {})
operations[http_method.lower()] = operation
spec.add_path(self.prefix + path, operations=operations)
if getattr(handler, 'Schema', None):
kwargs = {}
if getattr(handler.meta, 'model', None):
kwargs['description'] = utils.dedent(handler.meta.model.__doc__ or '')
spec.definition(handler.name, schema=handler.Schema, **kwargs)
return deepcopy(spec.to_dict()) | python | {
"resource": ""
} |
q42354 | update_pzone | train | def update_pzone(**kwargs):
"""Update pzone data in the DB"""
pzone = PZone.objects.get(**kwargs)
# get the data and loop through operate_on, applying them if necessary
when = timezone.now()
data = pzone.data
for operation in pzone.operations.filter(when__lte=when, applied=False):
data = operation.apply(data)
operation.applied = True
operation.save()
pzone.data = data
# create a history entry
pzone.history.create(data=pzone.data)
# save modified pzone, making transactions permanent
pzone.save() | python | {
"resource": ""
} |
q42355 | PZoneManager.operate_on | train | def operate_on(self, when=None, apply=False, **kwargs):
"""Do something with operate_on. If apply is True, all transactions will
be applied and saved via celery task."""
# get pzone based on id
pzone = self.get(**kwargs)
# cache the current time
now = timezone.now()
# ensure we have some value for when
if when is None:
when = now
if when < now:
histories = pzone.history.filter(date__lte=when)
if histories.exists():
# we have some history, use its data
pzone.data = histories[0].data
else:
# only apply operations if cache is expired or empty, or we're looking at the future
data = pzone.data
# Get the cached time of the next expiration
next_operation_time = cache.get('pzone-operation-expiry-' + pzone.name)
if next_operation_time is None or next_operation_time < when:
# start applying operations
pending_operations = pzone.operations.filter(when__lte=when, applied=False)
for operation in pending_operations:
data = operation.apply(data)
# reassign data
pzone.data = data
if apply and pending_operations.exists():
# there are operations to apply, do celery task
update_pzone.delay(**kwargs)
# return pzone, modified if apply was True
return pzone | python | {
"resource": ""
} |
q42356 | PZoneManager.preview | train | def preview(self, when=timezone.now(), **kwargs):
"""Preview transactions, but don't actually save changes to list."""
return self.operate_on(when=when, apply=False, **kwargs) | python | {
"resource": ""
} |
q42357 | strack.get_download_link | train | def get_download_link(self):
""" Get direct download link with soudcloud's redirect system. """
url = None
if not self.get("downloadable"):
try:
url = self.client.get_location(
self.client.STREAM_URL % self.get("id"))
except serror as e:
print(e)
if not url:
try:
url = self.client.get_location(
self.client.DOWNLOAD_URL % self.get("id"))
except serror as e:
print(e)
return url | python | {
"resource": ""
} |
q42358 | strack.get_file_extension | train | def get_file_extension(self, filepath):
"""
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
"""
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode("utf-8")
if mtype == "audio/mpeg":
ext = ".mp3"
elif mtype == "audio/x-wav":
ext = ".wav"
else:
ext = "." + self.get("original-format")
return ext | python | {
"resource": ""
} |
q42359 | strack.gen_localdir | train | def gen_localdir(self, localdir):
"""
Generate local directory where track will be saved.
Create it if not exists.
"""
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory | python | {
"resource": ""
} |
q42360 | strack.track_exists | train | def track_exists(self, localdir):
""" Check if track exists in local directory. """
path = glob.glob(self.gen_localdir(localdir) +
self.gen_filename() + "*")
if len(path) > 0 and os.path.getsize(path[0]) > 0:
return True
return False | python | {
"resource": ""
} |
q42361 | strack.get_ignored_tracks | train | def get_ignored_tracks(self, localdir):
""" Get ignored tracks list. """
ignore_file = "%s/.ignore" % localdir
list = []
if os.path.exists(ignore_file):
f = open(ignore_file)
ignored = f.readlines()
f.close()
for i in ignored:
list.append("%s/%s" % (localdir, i.rstrip()))
return list | python | {
"resource": ""
} |
q42362 | strack.download | train | def download(self, localdir, max_retry):
""" Download a track in local directory. """
local_file = self.gen_localdir(localdir) + self.gen_filename()
if self.track_exists(localdir):
print("Track {0} already downloaded, skipping!".format(
self.get("id")))
return False
if local_file in self.get_ignored_tracks(localdir):
print("\033[93mTrack {0} ignored, skipping!!\033[0m".format(
self.get("id")))
return False
dlurl = self.get_download_link()
if not dlurl:
raise serror("Can't download track_id:%d|%s" % (
self.get("id"),
self.get("title")))
retry = max_retry
print("\nDownloading %s (%d).." % (self.get("title"), self.get("id")))
while True:
try:
urllib.request.urlretrieve(dlurl, local_file,
self._progress_hook)
break
except Exception as e:
if os.path.isfile(local_file):
os.unlink(local_file)
retry -= 1
if retry < 0:
raise serror("Can't download track-id %s, max retry "
"reached (%d). Error occured: %s" % (
self.get("id"), max_retry, type(e)))
else:
print("\033[93mError occured for track-id %s (%s). "
"Retrying.. (%d/%d) \033[0m" % (
self.get("id"),
type(e),
max_retry - retry,
max_retry))
except KeyboardInterrupt:
if os.path.isfile(local_file):
os.unlink(local_file)
raise serror("KeyBoard Interrupt: Incomplete file removed.")
self.filepath = local_file + self.get_file_extension(local_file)
os.rename(local_file, self.filepath)
print("Downloaded => %s" % self.filepath)
self.downloaded = True
return True | python | {
"resource": ""
} |
q42363 | strack.process_tags | train | def process_tags(self, tag=None):
"""Process ID3 Tags for mp3 files."""
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
print("Processing tags for %s.." % self.filepath)
if tag is None:
tag = stag()
tag.load_id3(self)
tag.write_id3(self.filepath) | python | {
"resource": ""
} |
q42364 | strack.convert | train | def convert(self):
"""Convert file in mp3 format."""
if self.downloaded is False:
raise serror("Track not downloaded, can't convert file..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype == "audio/mpeg":
print("File is already in mp3 format. Skipping convert.")
return False
rootpath = os.path.dirname(os.path.dirname(self.filepath))
backupdir = rootpath + "/backups/" + self.get("username")
if not os.path.exists(backupdir):
os.makedirs(backupdir)
backupfile = "%s/%s%s" % (
backupdir,
self.gen_filename(),
self.get_file_extension(self.filepath))
newfile = "%s.mp3" % self.filename_without_extension()
os.rename(self.filepath, backupfile)
self.filepath = newfile
print("Converting to %s.." % newfile)
song = AudioSegment.from_file(backupfile)
return song.export(newfile, format="mp3") | python | {
"resource": ""
} |
q42365 | strack.download_artwork | train | def download_artwork(self, localdir, max_retry):
"""
Download track's artwork and return file path.
Artwork's path is saved in track's metadata as 'artwork-path' key.
"""
if self.get("artwork-url") == "None":
self.metadata["artwork-path"] = None
return None
artwork_dir = localdir + "/artworks"
if not os.path.isdir(artwork_dir):
if os.path.isfile(artwork_dir):
os.unlink(artwork_dir)
os.mkdir(artwork_dir)
artwork_filepath = artwork_dir + "/" + self.gen_artwork_filename()
retry = max_retry
while True:
try:
res = urllib.request.urlopen(self.get("artwork-url"))
with open(artwork_filepath, "wb") as file:
file.write(res.read())
break
except Exception as e:
retry -= 1
if retry < 0:
print(serror("Can't download track's artwork, max retry "
"reached (%d). Error occured: %s" % (
max_retry, type(e))))
return False
else:
print("\033[93mTrack's artwork download failed (%s). "
"Retrying.. (%d/%d) \033[0m" % (
type(e),
max_retry - retry,
max_retry))
self.metadata["artwork-path"] = artwork_filepath | python | {
"resource": ""
} |
q42366 | strack._progress_hook | train | def _progress_hook(self, blocknum, blocksize, totalsize):
""" Progress hook for urlretrieve. """
read = blocknum * blocksize
if totalsize > 0:
percent = read * 1e2 / totalsize
s = "\r%d%% %*d / %d" % (
percent, len(str(totalsize)), read, totalsize)
sys.stdout.write(s)
if read >= totalsize:
sys.stdout.write("\n")
else:
sys.stdout.write("read %d\n" % read) | python | {
"resource": ""
} |
q42367 | stag.load_id3 | train | def load_id3(self, track):
""" Load id3 tags from strack metadata """
if not isinstance(track, strack):
raise TypeError('strack object required')
timestamp = calendar.timegm(parse(track.get("created-at")).timetuple())
self.mapper[TIT1] = TIT1(text=track.get("description"))
self.mapper[TIT2] = TIT2(text=track.get("title"))
self.mapper[TIT3] = TIT3(text=track.get("tags-list"))
self.mapper[TDOR] = TDOR(text=str(timestamp))
self.mapper[TLEN] = TLEN(text=track.get("duration"))
self.mapper[TOFN] = TOFN(text=track.get("permalink"))
self.mapper[TCON] = TCON(text=track.get("genre"))
self.mapper[TCOP] = TCOP(text=track.get("license"))
self.mapper[WOAS] = WOAS(url=track.get("permalink-url"))
self.mapper[WOAF] = WOAF(url=track.get("uri"))
self.mapper[TPUB] = TPUB(text=track.get("username"))
self.mapper[WOAR] = WOAR(url=track.get("user-url"))
self.mapper[TPE1] = TPE1(text=track.get("artist"))
self.mapper[TALB] = TALB(text="%s Soundcloud tracks"
% track.get("artist"))
if track.get("artwork-path") is not None:
self.mapper[APIC] = APIC(value=track.get("artwork-path")) | python | {
"resource": ""
} |
q42368 | stag.write_id3 | train | def write_id3(self, filename):
""" Write id3 tags """
if not os.path.exists(filename):
raise ValueError("File doesn't exists.")
self.mapper.write(filename) | python | {
"resource": ""
} |
q42369 | SpriteTexturizer.from_images | train | def from_images(cls, images, weights=None, filter=None, wrap=None,
aspect_adjust_width=False, aspect_adjust_height=False):
"""Create a SpriteTexturizer from a sequence of Pyglet images.
Note all the images must be able to fit into a single OpenGL texture, so
their combined size should typically be less than 1024x1024
"""
import pyglet
atlas, textures = _atlas_from_images(images)
texturizer = cls(
atlas.texture.id, [tex.tex_coords for tex in textures],
weights, filter or pyglet.gl.GL_LINEAR, wrap or pyglet.gl.GL_CLAMP,
aspect_adjust_width, aspect_adjust_height)
texturizer.atlas = atlas
texturizer.textures = textures
return texturizer | python | {
"resource": ""
} |
q42370 | parse_querystring | train | def parse_querystring(querystring):
"""
Return parsed querystring in dict
"""
if querystring is None or len(querystring) == 0:
return {}
qs_dict = parse.parse_qs(querystring, keep_blank_values=True)
for key in qs_dict:
if len(qs_dict[key]) != 1:
continue
qs_dict[key] = qs_dict[key][0]
if qs_dict[key] == '':
qs_dict[key] = True
return dict((key, qs_dict[key]) for key in qs_dict if len(key) != 0) | python | {
"resource": ""
} |
q42371 | Message.to_json | train | def to_json(self, pretty=True):
"""
to_json will call to_dict then dumps into json format
"""
data_dict = self.to_dict()
if pretty:
return json.dumps(
data_dict, sort_keys=True, indent=2)
return json.dumps(data_dict, sort_keys=True) | python | {
"resource": ""
} |
q42372 | Message.to_dict | train | def to_dict(self):
"""
to_dict will clean all protected and private properties
"""
return dict(
(k, self.__dict__[k]) for k in self.__dict__ if k.find("_") != 0) | python | {
"resource": ""
} |
q42373 | Message.match | train | def match(self, route):
"""
Match input route and return new Message instance
with parsed content
"""
_resource = trim_resource(self.resource)
self.method = self.method.lower()
resource_match = route.resource_regex.search(_resource)
if resource_match is None:
return None
# build params and querystring
params = resource_match.groupdict()
querystring = params.pop("querystring", "")
setattr(self, "param", params)
setattr(self, "query", parse_querystring(querystring))
return copy.deepcopy(self) | python | {
"resource": ""
} |
q42374 | Message.get_message_type | train | def get_message_type(message):
"""
Return message's type
"""
for msg_type in MessageType.FIELDS:
if Message.is_type(msg_type, message):
return msg_type
return MessageType.UNKNOWN | python | {
"resource": ""
} |
q42375 | Message.is_type | train | def is_type(msg_type, msg):
"""
Return message's type is or not
"""
for prop in MessageType.FIELDS[msg_type]["must"]:
if msg.get(prop, False) is False:
return False
for prop in MessageType.FIELDS[msg_type]["prohibit"]:
if msg.get(prop, False) is not False:
return False
return True | python | {
"resource": ""
} |
q42376 | JSONRPCService.add | train | def add(self, f, name=None, types=None, required=None):
"""
Adds a new method to the jsonrpc service.
Arguments:
f -- the remote function
name -- name of the method in the jsonrpc service
types -- list or dictionary of the types of accepted arguments
required -- list of required keyword arguments
If name argument is not given, function's own name will be used.
Argument types must be a list if positional arguments are used or a
dictionary if keyword arguments are used in the method in question.
Argument required MUST be used only for methods requiring keyword
arguments, not for methods accepting positional arguments.
"""
if name is None:
fname = f.__name__ # Register the function using its own name.
else:
fname = name
self.method_data[fname] = {'method': f}
if types is not None:
self.method_data[fname]['types'] = types
if required is not None:
self.method_data[fname]['required'] = required | python | {
"resource": ""
} |
q42377 | JSONRPCService.stopServing | train | def stopServing(self, exception=None):
"""
Returns a deferred that will fire immediately if there are
no pending requests, otherwise when the last request is removed
from self.pending.
"""
if exception is None:
exception = ServiceUnavailableError
self.serve_exception = exception
if self.pending:
d = self.out_of_service_deferred = defer.Deferred()
return d
return defer.succeed(None) | python | {
"resource": ""
} |
q42378 | JSONRPCService.call | train | def call(self, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = yield self.call_py(jsondata)
if result is None:
defer.returnValue(None)
else:
defer.returnValue(json.dumps(result)) | python | {
"resource": ""
} |
q42379 | JSONRPCService.call_py | train | def call_py(self, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
try:
try:
rdata = json.loads(jsondata)
except ValueError:
raise ParseError
except ParseError, e:
defer.returnValue(self._get_err(e))
return
# set some default values for error handling
request = self._get_default_vals()
try:
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = yield self._handle_request(request)
# Don't respond to notifications
if respond is None:
defer.returnValue(None)
else:
defer.returnValue(respond)
return
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
try:
self._fill_request(request_, rdata_)
except InvalidRequestError, e:
err = self._get_err(e, request_['id'])
if err:
responds.append(err)
continue
except JSONRPCError, e:
err = self._get_err(e, request_['id'])
if err:
responds.append(err)
continue
requests.append(request_)
for request_ in requests:
try:
# TODO: We should use a deferred list so requests
# are processed in parallel
respond = yield self._handle_request(request_)
except JSONRPCError, e:
respond = self._get_err(e,
request_['id'],
request_['jsonrpc'])
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
defer.returnValue(responds)
return
# Nothing to respond.
defer.returnValue(None)
return
else:
# empty dict, list or wrong type
raise InvalidRequestError
except InvalidRequestError, e:
defer.returnValue(self._get_err(e, request['id']))
except JSONRPCError, e:
defer.returnValue(self._get_err(e,
request['id'],
request['jsonrpc'])) | python | {
"resource": ""
} |
q42380 | JSONRPCService._get_err | train | def _get_err(self, e, id=None, jsonrpc=DEFAULT_JSONRPC):
"""
Returns jsonrpc error message.
"""
# Do not respond to notifications when the request is valid.
if not id \
and not isinstance(e, ParseError) \
and not isinstance(e, InvalidRequestError):
return None
respond = {'id': id}
if isinstance(jsonrpc, int):
# v1.0 requires result to exist always.
# No error codes are defined in v1.0 so only use the message.
if jsonrpc == 10:
respond['result'] = None
respond['error'] = e.dumps()['message']
else:
self._fill_ver(jsonrpc, respond)
respond['error'] = e.dumps()
else:
respond['jsonrpc'] = jsonrpc
respond['error'] = e.dumps()
return respond | python | {
"resource": ""
} |
q42381 | JSONRPCService._man_args | train | def _man_args(self, f):
"""
Returns number of mandatory arguments required by given function.
"""
argcount = f.func_code.co_argcount
# account for "self" getting passed to class instance methods
if isinstance(f, types.MethodType):
argcount -= 1
if f.func_defaults is None:
return argcount
return argcount - len(f.func_defaults) | python | {
"resource": ""
} |
q42382 | JSONRPCService._max_args | train | def _max_args(self, f):
"""
Returns maximum number of arguments accepted by given function.
"""
if f.func_defaults is None:
return f.func_code.co_argcount
return f.func_code.co_argcount + len(f.func_defaults) | python | {
"resource": ""
} |
q42383 | JSONRPCService._get_id | train | def _get_id(self, rdata):
"""
Returns jsonrpc request's id value or None if there is none.
InvalidRequestError will be raised if the id value has invalid type.
"""
if 'id' in rdata:
if isinstance(rdata['id'], basestring) or \
isinstance(rdata['id'], int) or \
isinstance(rdata['id'], long) or \
isinstance(rdata['id'], float) or \
rdata['id'] is None:
return rdata['id']
else:
# invalid type
raise InvalidRequestError
else:
# It's a notification.
return None | python | {
"resource": ""
} |
q42384 | JSONRPCService._get_method | train | def _get_method(self, rdata):
"""
Returns jsonrpc request's method value.
InvalidRequestError will be raised if it's missing or is wrong type.
MethodNotFoundError will be raised if a method with given method name
does not exist.
"""
if 'method' in rdata:
if not isinstance(rdata['method'], basestring):
raise InvalidRequestError
else:
raise InvalidRequestError
if rdata['method'] not in self.method_data.keys():
raise MethodNotFoundError
return rdata['method'] | python | {
"resource": ""
} |
q42385 | JSONRPCService._get_params | train | def _get_params(self, rdata):
"""
Returns a list of jsonrpc request's method parameters.
"""
if 'params' in rdata:
if isinstance(rdata['params'], dict) \
or isinstance(rdata['params'], list) \
or rdata['params'] is None:
return rdata['params']
else:
# wrong type
raise InvalidRequestError
else:
return None | python | {
"resource": ""
} |
q42386 | JSONRPCService._fill_request | train | def _fill_request(self, request, rdata):
"""Fills request with data from the jsonrpc call."""
if not isinstance(rdata, dict):
raise InvalidRequestError
request['jsonrpc'] = self._get_jsonrpc(rdata)
request['id'] = self._get_id(rdata)
request['method'] = self._get_method(rdata)
request['params'] = self._get_params(rdata) | python | {
"resource": ""
} |
q42387 | JSONRPCService._call_method | train | def _call_method(self, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method):
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if not self._vargs(method) \
and len(params) > self._max_args(method):
raise InvalidParamsError('too many arguments')
result = yield defer.maybeDeferred(method, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = yield defer.maybeDeferred(method, **params)
else: # No params
result = yield defer.maybeDeferred(method)
except JSONRPCError:
raise
except Exception:
# Exception was raised inside the method.
log.msg('Exception raised while invoking RPC method "{}".'.format(
request['method']))
log.err()
raise ServerError
defer.returnValue(result) | python | {
"resource": ""
} |
q42388 | JSONRPCService._handle_request | train | def _handle_request(self, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
if self.serve_exception:
raise self.serve_exception()
d = self._call_method(request)
self.pending.add(d)
if self.timeout:
timeout_deferred = self.reactor.callLater(self.timeout, d.cancel)
def completed(result):
if timeout_deferred.active():
# cancel the timeout_deferred if it has not been fired yet
# this is to prevent d's deferred chain from firing twice
# (and raising an exception).
timeout_deferred.cancel()
return result
d.addBoth(completed)
try:
result = yield d
except defer.CancelledError:
# The request was cancelled due to a timeout or by cancelPending
# having been called. We return a TimeoutError to the client.
self._remove_pending(d)
raise TimeoutError()
except Exception as e:
self._remove_pending(d)
raise e
self._remove_pending(d)
# Do not respond to notifications.
if request['id'] is None:
defer.returnValue(None)
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
defer.returnValue(respond) | python | {
"resource": ""
} |
q42389 | JSONRPCService._validate_params_types | train | def _validate_params_types(self, method, params):
"""
Validates request's parameter types.
"""
if isinstance(params, list):
if not isinstance(self.method_data[method]['types'], list):
raise InvalidParamsError(
'expected keyword params, not positional')
for param, type, posnum in zip(params,
self.method_data[method]['types'],
range(1, len(params)+1)):
if not (isinstance(param, type) or param is None):
raise InvalidParamsError(
'positional arg #{} is the wrong type'.format(posnum))
elif isinstance(params, dict):
if not isinstance(self.method_data[method]['types'], dict):
raise InvalidParamsError(
'expected positional params, not keyword')
if 'required' in self.method_data[method]:
for key in self.method_data[method]['required']:
if key not in params:
raise InvalidParamsError('missing key: %s' % key)
for key in params.keys():
if key not in self.method_data[method]['types'] or \
not (isinstance(params[key],
self.method_data[method]['types'][key])
or params[key] is None):
raise InvalidParamsError(
'arg "{}" is the wrong type'.format(key)) | python | {
"resource": ""
} |
q42390 | JSONRPCClientService.startService | train | def startService(self):
"""
Start the service and connect the JSONRPCClientFactory.
"""
self.clientFactory.connect().addErrback(
log.err, 'error starting the JSON-RPC client service %r' % (self,))
service.Service.startService(self) | python | {
"resource": ""
} |
q42391 | JSONRPCClientService.callRemote | train | def callRemote(self, *a, **kw):
"""
Make a callRemote request of the JSONRPCClientFactory.
"""
if not self.running:
return defer.fail(ServiceStopped())
return self.clientFactory.callRemote(*a, **kw) | python | {
"resource": ""
} |
q42392 | JSONRPCError.dumps | train | def dumps(self):
"""Return the Exception data in a format for JSON-RPC."""
error = {'code': self.code,
'message': str(self.message)}
if self.data is not None:
error['data'] = self.data
return error | python | {
"resource": ""
} |
q42393 | ReadTuple.stringize | train | def stringize(
self,
rnf_profile=RnfProfile(),
):
"""Create RNF representation of this read.
Args:
read_tuple_id_width (int): Maximal expected string length of read tuple ID.
genome_id_width (int): Maximal expected string length of genome ID.
chr_id_width (int): Maximal expected string length of chromosome ID.
coor_width (int): Maximal expected string length of a coordinate.
"""
sorted_segments = sorted(self.segments,
key=lambda x: (
x.genome_id * (10 ** 23) +
x.chr_id * (10 ** 21) +
(x.left + (int(x.left == 0) * x.right - 1)) * (10 ** 11) +
x.right * (10 ** 1) +
int(x.direction == "F")
)
)
segments_strings = [x.stringize(rnf_profile) for x in sorted_segments]
read_tuple_name = "__".join(
[
self.prefix,
format(self.read_tuple_id, 'x').zfill(rnf_profile.read_tuple_id_width),
",".join(segments_strings),
self.suffix,
]
)
return read_tuple_name | python | {
"resource": ""
} |
q42394 | ReadTuple.destringize | train | def destringize(self, string):
"""Get RNF values for this read from its textual representation and save them
into this object.
Args:
string(str): Textual representation of a read.
Raises:
ValueError
"""
# todo: assert -- starting with (, ending with )
# (prefix,read_tuple_id,segments_t,suffix)=(text).split("__")
# segments=segments_t.split("),(")
m = read_tuple_destr_pattern.match(string)
if not m:
smbl.messages.error(
"'{}' is not a valid read name with respect to the RNF specification".format(string),
program="RNFtools", subprogram="RNF format", exception=ValueError
)
groups = m.groups()
# todo: check number of groups
self.prefix = groups[0]
read_tuple_id = groups[1]
self.read_tuple_id = int(read_tuple_id, 16)
self.segments = []
segments_str = groups[2:-1]
for b_str in segments_str:
if b_str is not None:
if b_str[0] == ",":
b_str = b_str[1:]
b = rnftools.rnfformat.Segment()
b.destringize(b_str)
self.segments.append(b)
self.suffix = groups[-1] | python | {
"resource": ""
} |
q42395 | WSGIPlugin.set_server | train | def set_server(self, wsgi_app, fnc_serve=None):
"""
figures out how the wsgi application is to be served
according to config
"""
self.set_wsgi_app(wsgi_app)
ssl_config = self.get_config("ssl")
ssl_context = {}
if self.get_config("server") == "gevent":
if ssl_config.get("enabled"):
ssl_context["certfile"] = ssl_config.get("cert")
ssl_context["keyfile"] = ssl_config.get("key")
from gevent.pywsgi import WSGIServer
http_server = WSGIServer(
(self.host, self.port),
wsgi_app,
**ssl_context
)
self.log.debug("Serving WSGI via gevent.pywsgi.WSGIServer")
fnc_serve = http_server.serve_forever
elif self.get_config("server") == "uwsgi":
self.pluginmgr_config["start_manual"] = True
elif self.get_config("server") == "gunicorn":
self.pluginmgr_config["start_manual"] = True
elif self.get_config("server") == "self":
fnc_serve = self.run
# figure out async handler
if self.get_config("async") == "gevent":
# handle async via gevent
import gevent
self.log.debug("Handling wsgi on gevent")
self.worker = gevent.spawn(fnc_serve)
elif self.get_config("async") == "thread":
self.worker = fnc_serve
else:
self.worker = fnc_serve | python | {
"resource": ""
} |
q42396 | SlugPreviewField.pre_save | train | def pre_save(self, instance, add):
"""
Auto-generate the slug if needed.
"""
# get currently entered slug
value = self.value_from_object(instance)
slug = None
# auto populate (if the form didn't do that already).
# If you want unique_with logic, use django-autoslug instead.
# This model field only allows parameters which can be passed to the form widget too.
if self.populate_from and (self.always_update or not value):
value = getattr(instance, self.populate_from)
# Make sure the slugify logic is applied,
# even on manually entered input.
if value:
value = force_text(value)
slug = self.slugify(value)
if self.max_length < len(slug):
slug = slug[:self.max_length]
# make the updated slug available as instance attribute
setattr(instance, self.name, slug)
return slug | python | {
"resource": ""
} |
q42397 | FirstSlotSlicer | train | def FirstSlotSlicer(primary_query, secondary_query, limit=30): # noqa
"""
Inject the first object from a queryset into the first position of a reading list.
:param primary_queryset: djes.LazySearch object. Default queryset for reading list.
:param secondary_queryset: djes.LazySearch object. first result leads the reading_list.
:return list: mixed reading list.
"""
reading_list = SearchSlicer(limit=limit)
reading_list.register_queryset(primary_query)
reading_list.register_queryset(secondary_query, validator=lambda x: bool(x == 0))
return reading_list | python | {
"resource": ""
} |
q42398 | SearchSlicer.register_queryset | train | def register_queryset(self, queryset, validator=None, default=False):
"""
Add a given queryset to the iterator with custom logic for iteration.
:param queryset: List of objects included in the reading list.
:param validator: Custom logic to determine a queryset's position in a reading_list.
Validators must accept an index as an argument and return a truthy value.
:param default: Sets the given queryset as the primary queryset when no validator applies.
"""
if default or self.default_queryset is None:
self.default_queryset = queryset
return
if validator:
self.querysets[validator] = queryset
else:
raise ValueError(
"""Querysets require validation logic to integrate with reading lists."""
) | python | {
"resource": ""
} |
q42399 | Command.get_month_start_date | train | def get_month_start_date(self):
"""Returns the first day of the current month"""
now = timezone.now()
return timezone.datetime(day=1, month=now.month, year=now.year, tzinfo=now.tzinfo) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.