Search is not available for this dataset
text stringlengths 75 104k |
|---|
def parse_table(table, flatten=True, footer=False):
"""Parses a table from sports-reference sites into a pandas dataframe.
:param table: the PyQuery object representing the HTML table
:param flatten: if True, flattens relative URLs to IDs. otherwise, leaves
all fields as text without cleaning.
:param footer: If True, returns the summary/footer of the page. Recommended
to use this with flatten=False. Defaults to False.
:returns: pd.DataFrame
"""
if not len(table):
return pd.DataFrame()
# get columns
columns = [c.attrib['data-stat']
for c in table('thead tr:not([class]) th[data-stat]')]
# get data
rows = list(table('tbody tr' if not footer else 'tfoot tr')
.not_('.thead, .stat_total, .stat_average').items())
data = [
[flatten_links(td) if flatten else td.text()
for td in row.items('th,td')]
for row in rows
]
# make DataFrame
df = pd.DataFrame(data, columns=columns, dtype='float')
# add has_class columns
allClasses = set(
cls
for row in rows
if row.attr['class']
for cls in row.attr['class'].split()
)
for cls in allClasses:
df['has_class_' + cls] = [
bool(row.attr['class'] and
cls in row.attr['class'].split())
for row in rows
]
# cleaning the DataFrame
df.drop(['ranker', 'Xxx', 'Yyy', 'Zzz'],
axis=1, inplace=True, errors='ignore')
# year_id -> year (as int)
if 'year_id' in df.columns:
df.rename(columns={'year_id': 'year'}, inplace=True)
if flatten:
df.year = df.year.fillna(method='ffill')
df['year'] = df.year.map(lambda s: str(s)[:4]).astype(int)
# pos -> position
if 'pos' in df.columns:
df.rename(columns={'pos': 'position'}, inplace=True)
# boxscore_word, game_date -> boxscore_id and separate into Y, M, D columns
for bs_id_col in ('boxscore_word', 'game_date', 'box_score_text'):
if bs_id_col in df.columns:
df.rename(columns={bs_id_col: 'boxscore_id'}, inplace=True)
break
# ignore *, +, and other characters used to note things
df.replace(re.compile(r'[\*\+\u2605]', re.U), '', inplace=True)
for col in df.columns:
if hasattr(df[col], 'str'):
df[col] = df[col].str.strip()
# player -> player_id and/or player_name
if 'player' in df.columns:
if flatten:
df.rename(columns={'player': 'player_id'}, inplace=True)
# when flattening, keep a column for names
player_names = parse_table(table, flatten=False)['player_name']
df['player_name'] = player_names
else:
df.rename(columns={'player': 'player_name'}, inplace=True)
# team, team_name -> team_id
for team_col in ('team', 'team_name'):
if team_col in df.columns:
# first, get rid of faulty rows
df = df.loc[~df[team_col].isin(['XXX'])]
if flatten:
df.rename(columns={team_col: 'team_id'}, inplace=True)
# season -> int
if 'season' in df.columns and flatten:
df['season'] = df['season'].astype(int)
# handle date_game columns (different types)
if 'date_game' in df.columns and flatten:
date_re = r'month=(?P<month>\d+)&day=(?P<day>\d+)&year=(?P<year>\d+)'
date_df = df['date_game'].str.extract(date_re, expand=True)
if date_df.notnull().all(axis=1).any():
df = pd.concat((df, date_df), axis=1)
else:
df.rename(columns={'date_game': 'boxscore_id'}, inplace=True)
# game_location -> is_home
if 'game_location' in df.columns and flatten:
df['game_location'] = df['game_location'].isnull()
df.rename(columns={'game_location': 'is_home'}, inplace=True)
# mp: (min:sec) -> float(min + sec / 60), notes -> NaN, new column
if 'mp' in df.columns and df.dtypes['mp'] == object and flatten:
mp_df = df['mp'].str.extract(
r'(?P<m>\d+):(?P<s>\d+)', expand=True).astype(float)
no_match = mp_df.isnull().all(axis=1)
if no_match.any():
df.loc[no_match, 'note'] = df.loc[no_match, 'mp']
df['mp'] = mp_df['m'] + mp_df['s'] / 60
# converts number-y things to floats
def convert_to_float(val):
# percentages: (number%) -> float(number * 0.01)
m = re.search(r'([-\.\d]+)\%',
val if isinstance(val, basestring) else str(val), re.U)
try:
if m:
return float(m.group(1)) / 100 if m else val
if m:
return int(m.group(1)) + int(m.group(2)) / 60
except ValueError:
return val
# salaries: $ABC,DEF,GHI -> float(ABCDEFGHI)
m = re.search(r'\$[\d,]+',
val if isinstance(val, basestring) else str(val), re.U)
try:
if m:
return float(re.sub(r'\$|,', '', val))
except Exception:
return val
# generally try to coerce to float, unless it's an int or bool
try:
if isinstance(val, (int, bool)):
return val
else:
return float(val)
except Exception:
return val
if flatten:
df = df.applymap(convert_to_float)
df = df.loc[df.astype(bool).any(axis=1)]
return df |
def parse_info_table(table):
"""Parses an info table, like the "Game Info" table or the "Officials"
table on the PFR Boxscore page. Keys are lower case and have spaces/special
characters converted to underscores.
:table: PyQuery object representing the HTML table.
:returns: A dictionary representing the information.
"""
ret = {}
for tr in list(table('tr').not_('.thead').items()):
th, td = list(tr('th, td').items())
key = th.text().lower()
key = re.sub(r'\W', '_', key)
val = sportsref.utils.flatten_links(td)
ret[key] = val
return ret |
def flatten_links(td, _recurse=False):
"""Flattens relative URLs within text of a table cell to IDs and returns
the result.
:td: the PyQuery object for the HTML to convert
:returns: the string with the links flattened to IDs
"""
# helper function to flatten individual strings/links
def _flatten_node(c):
if isinstance(c, basestring):
return c.strip()
elif 'href' in c.attrib:
c_id = rel_url_to_id(c.attrib['href'])
return c_id if c_id else c.text_content().strip()
else:
return flatten_links(pq(c), _recurse=True)
# if there's no text, just return None
if td is None or not td.text():
return '' if _recurse else None
td.remove('span.note')
return ''.join(_flatten_node(c) for c in td.contents()) |
def rel_url_to_id(url):
"""Converts a relative URL to a unique ID.
Here, 'ID' refers generally to the unique ID for a given 'type' that a
given datum has. For example, 'BradTo00' is Tom Brady's player ID - this
corresponds to his relative URL, '/players/B/BradTo00.htm'. Similarly,
'201409070dal' refers to the boxscore of the SF @ DAL game on 09/07/14.
Supported types:
* player/...
* boxscores/...
* teams/...
* years/...
* leagues/...
* awards/...
* coaches/...
* officials/...
* schools/...
* schools/high_schools.cgi?id=...
:returns: ID associated with the given relative URL.
"""
yearRegex = r'.*/years/(\d{4}).*|.*/gamelog/(\d{4}).*'
playerRegex = r'.*/players/(?:\w/)?(.+?)(?:/|\.html?)'
boxscoresRegex = r'.*/boxscores/(.+?)\.html?'
teamRegex = r'.*/teams/(\w{3})/.*'
coachRegex = r'.*/coaches/(.+?)\.html?'
stadiumRegex = r'.*/stadiums/(.+?)\.html?'
refRegex = r'.*/officials/(.+?r)\.html?'
collegeRegex = r'.*/schools/(\S+?)/.*|.*college=([^&]+)'
hsRegex = r'.*/schools/high_schools\.cgi\?id=([^\&]{8})'
bsDateRegex = r'.*/boxscores/index\.f?cgi\?(month=\d+&day=\d+&year=\d+)'
leagueRegex = r'.*/leagues/(.*_\d{4}).*'
awardRegex = r'.*/awards/(.+)\.htm'
regexes = [
yearRegex,
playerRegex,
boxscoresRegex,
teamRegex,
coachRegex,
stadiumRegex,
refRegex,
collegeRegex,
hsRegex,
bsDateRegex,
leagueRegex,
awardRegex,
]
for regex in regexes:
match = re.match(regex, url, re.I)
if match:
return [_f for _f in match.groups() if _f][0]
# things we don't want to match but don't want to print a WARNING
if any(
url.startswith(s) for s in
(
'/play-index/',
)
):
return url
print('WARNING. NO MATCH WAS FOUND FOR "{}"'.format(url))
return url |
def PlayerSeasonFinder(**kwargs):
""" Docstring will be filled in by __init__.py """
if 'offset' not in kwargs:
kwargs['offset'] = 0
playerSeasons = []
while True:
querystring = _kwargs_to_qs(**kwargs)
url = '{}?{}'.format(PSF_URL, querystring)
if kwargs.get('verbose', False):
print(url)
html = utils.get_html(url)
doc = pq(html)
table = doc('table#results')
df = utils.parse_table(table)
if df.empty:
break
thisSeason = list(zip(df.player_id, df.year))
playerSeasons.extend(thisSeason)
if doc('*:contains("Next Page")'):
kwargs['offset'] += 100
else:
break
return playerSeasons |
def _kwargs_to_qs(**kwargs):
"""Converts kwargs given to PSF to a querystring.
:returns: the querystring.
"""
# start with defaults
inpOptDef = inputs_options_defaults()
opts = {
name: dct['value']
for name, dct in inpOptDef.items()
}
# clean up keys and values
for k, v in kwargs.items():
del kwargs[k]
# bool => 'Y'|'N'
if isinstance(v, bool):
kwargs[k] = 'Y' if v else 'N'
# tm, team => team_id
elif k.lower() in ('tm', 'team'):
kwargs['team_id'] = v
# yr, year, yrs, years => year_min, year_max
elif k.lower() in ('yr', 'year', 'yrs', 'years'):
if isinstance(v, collections.Iterable):
lst = list(v)
kwargs['year_min'] = min(lst)
kwargs['year_max'] = max(lst)
elif isinstance(v, basestring):
v = list(map(int, v.split(',')))
kwargs['year_min'] = min(v)
kwargs['year_max'] = max(v)
else:
kwargs['year_min'] = v
kwargs['year_max'] = v
# pos, position, positions => pos[]
elif k.lower() in ('pos', 'position', 'positions'):
if isinstance(v, basestring):
v = v.split(',')
elif not isinstance(v, collections.Iterable):
v = [v]
kwargs['pos[]'] = v
# draft_pos, ... => draft_pos[]
elif k.lower() in (
'draft_pos', 'draftpos', 'draftposition', 'draftpositions',
'draft_position', 'draft_positions'
):
if isinstance(v, basestring):
v = v.split(',')
elif not isinstance(v, collections.Iterable):
v = [v]
kwargs['draft_pos[]'] = v
# if not one of these cases, put it back in kwargs
else:
kwargs[k] = v
# update based on kwargs
for k, v in kwargs.items():
# if overwriting a default, overwrite it (with a list so the
# opts -> querystring list comp works)
if k in opts or k in ('pos[]', 'draft_pos[]'):
# if multiple values separated by commas, split em
if isinstance(v, basestring):
v = v.split(',')
# otherwise, make sure it's a list
elif not isinstance(v, collections.Iterable):
v = [v]
# then, add list of values to the querystring dict *opts*
opts[k] = v
if 'draft' in k:
opts['draft'] = [1]
opts['request'] = [1]
opts['offset'] = [kwargs.get('offset', 0)]
qs = '&'.join(
'{}={}'.format(urllib.parse.quote_plus(name), val)
for name, vals in sorted(opts.items()) for val in vals
)
return qs |
def _Streamer__read_process(self, path, read_size, cbuf, stop, barrier, cyclic, offset, read_skip, sync):
"""
Main function for the processes that read from the HDF5 file.
:param self: A reference to the streamer object that created these processes.
:param path: The HDF5 path to the node to be read from.
:param read_size: The length of the block along the outer dimension to read.
:param cbuf: The circular buffer to place read elements into.
:param stop: The Event that signals the process to stop reading.
:param barrier: The Barrier that synchonises read cycles.
:param cyclic: True if the process should read cyclically.
:param offset: Offset into the dataset that this process should start reading at.
:param read_skip: How many element to skip on each iteration.
:param sync: GuardSynchonizer to order writes to the buffer.
:return: Nothing
"""
# Multi-process access to HDF5 seems to behave better there are no top level imports of PyTables.
import tables as tb
h5_file = tb.open_file(self.filename, 'r', **self.h5_kw_args)
ary = h5_file.get_node(path)
i = offset
while not stop.is_set():
vals = ary[i:i + read_size]
# If the read goes off the end of the dataset, then wrap to the start.
if i + read_size > len(ary):
vals = np.concatenate([vals, ary[0:read_size - len(vals)]])
if sync is None:
# If no ordering is requested, then just write to the next available space in the buffer.
with cbuf.put_direct() as put_ary:
put_ary[:] = vals
else:
# Otherwise, use the sync object to ensure that writes occur in the order provided by i.
# So i = 0 will write first, then i = block_size, then i = 2*block_size, etc...
# The sync object has two ordered barriers so that acquisition and release of the buffer spaces
# are synchronized in order, but the actual writing to the buffer can happen simultaneously.
# If only one barrier were used, writing to the buffer would be linearised.
with sync.do(cbuf.put_direct(), i, (i+read_size) % len(ary)) as put_ary:
put_ary[:] = vals
i += read_skip
if cyclic:
# If the next iteration is past the end of the dataset, wrap it around.
if i >= len(ary):
i %= len(ary)
barrier.wait()
else:
# But if cyclic mode is disabled, break the loop as the work is now done.
if i + read_size > len(ary):
break |
def wait(self):
"""Wait until all processes have reached the barrier."""
with self.cvar:
self.count.value += 1
self.cvar.notify_all()
while self.count.value < self.n_procs:
self.cvar.wait() |
def wait(self):
"""Wait until all processes have reached the barrier."""
self.barrier_A.wait()
# The current barrier (barrier_A) is switched with the reserve barrier.
# This is because the current barrier cannot be safely reset until the reserve barrier has been passed.
self.barrier_A, self.barrier_B = self.barrier_B, self.barrier_A
self.barrier_A.reset() |
def wait(self, index, next_index=None):
"""
Block until it is the turn indicated by index.
:param index:
:param next_index: Set the index to this value after finishing. Releases the process waiting on next_index.
Defaults to incrementing index by 1.
:return:
"""
return OrderedBarrier.Guard(self, index, index+1 if next_index is None else next_index) |
def do(self, guard, index, next_index):
"""
Create a guard that requires the resource guard to be entered and exited based on the order provided by index.
:param guard: The context manager for the resource.
:param index: The order to wait for.
:param next_index: The next index to release.
:return:
"""
return GuardSynchronizer.Guard(self, guard, index, next_index) |
def put(self, v):
"""
Put an unsigned integer into the queue. This method always assumes that there is space in the queue.
( In the circular buffer, this is guaranteed by the implementation )
:param v: The item to insert. Must be >= 0, as -2 is used to signal a queue close.
:return:
"""
if v is QueueClosed:
v = -2
else:
assert(v >= 0)
with self.cvar:
assert(self.size.value < len(self.vals))
head = (self.tail.value + self.size.value) % len(self.vals)
self.vals[head] = v
self.size.value += 1
self.cvar.notify() |
def get(self):
"""
Fetch the next item in the queue. Blocks until an item is ready.
:return: The next unsigned integer in the queue.
"""
with self.cvar:
while True:
if self.size.value > 0:
rval = self.vals[self.tail.value]
self.tail.value = (self.tail.value + 1) % len(self.vals)
self.size.value -= 1
if rval == -2:
return QueueClosed
assert(rval >= 0)
return rval
self.cvar.wait() |
def put_direct(self):
"""
Allows direct access to the buffer element.
Blocks until there is room to write into the buffer.
:return: A guard object that returns the buffer element.
"""
# Once the guard is released, write_idx will be placed into read_queue.
return self.Guard(self.read_queue, self.arys, self.__put_idx) |
def get_direct(self):
"""
Allows direct access to the buffer element.
Blocks until there is data that can be read.
:return: A guard object that returns the buffer element.
"""
read_idx = self.__get_idx()
if read_idx is QueueClosed:
return QueueClosed
# Once the guard is released, read_idx will be placed into write_queue.
return self.Guard(self.write_queue, self.arys, lambda: read_idx) |
def close(self):
"""Close the queue, signalling that no more data can be put into the queue."""
self.read_queue.put(QueueClosed)
self.write_queue.put(QueueClosed) |
def __get_batch(self, path, length, last=False):
"""
Get a block of data from the node at path.
:param path: The path to the node to read from.
:param length: The length along the outer dimension to read.
:param last: True if the remainder elements should be read.
:return: A copy of the requested block of data as a numpy array.
"""
import tables
h5_file = tables.open_file(self.filename, 'r')
h5_node = h5_file.get_node(path)
if len(h5_node) == 0:
raise Exception("Cannot read from empty dataset.")
# If the length isn't specified, then fall back to default values.
if length is None:
chunkshape = h5_node.chunkshape
# If the array isn't chunked, then try to make the block close to 128KB.
if chunkshape is None:
default_length = 128*2**10//h5_node[0].nbytes # Divides by one row of the dataset.
length = min(h5_node.shape[0], default_length)
# If it is chunked, then use the chunkshape for best performance.
else:
length = chunkshape[0]
if last:
example = h5_node[length*(len(h5_node)//length):].copy()
else:
example = h5_node[:length].copy()
h5_file.close()
return example |
def get_remainder(self, path, block_size):
"""
Get the remainder elements. These elements will not be read in the direct queue access cyclic=False mode.
:param path: The HDF5 path to the dataset to be read.
:param block_size: The block size is used to calculate which elements will remain.
:return: A copy of the remainder elements as a numpy array.
"""
return self.__get_batch(path, length=block_size, last=True) |
def get_queue(self, path, n_procs=4, read_ahead=None, cyclic=False, block_size=None, ordered=False):
"""
Get a queue that allows direct access to the internal buffer. If the dataset to be read is chunked, the
block_size should be a multiple of the chunk size to maximise performance. In this case it is best to leave it
to the default. When cyclic=False, and block_size does not divide the dataset evenly, the remainder elements
will not be returned by the queue. When cyclic=True, the remainder elements will be part of a block that wraps
around the end and includes element from the beginning of the dataset. By default, blocks are returned in the
order in which they become available. The ordered option will force blocks to be returned in on-disk order.
:param path: The HDF5 path to the dataset that should be read.
:param n_procs: The number of background processes used to read the datset in parallel.
:param read_ahead: The number of blocks to allocate in the internal buffer.
:param cyclic: True if the queue should wrap at the end of the dataset.
:param block_size: The size along the outer dimension of the blocks to be read. Defaults to a multiple of
the chunk size, or to a 128KB sized block if the dataset is not chunked.
:param ordered: Force the reader return data in on-disk order. May result in performance penalty.
:return: A queue object that allows access to the internal buffer.
"""
# Get a block_size length of elements from the dataset to serve as a template for creating the buffer.
# If block_size=None, then get_batch calculates an appropriate block size.
example = self.__get_batch(path, block_size)
block_size = example.shape[0]
if read_ahead is None:
# 2x No. of processes for writing, 1 extra for reading.
read_ahead = 2*n_procs + 1
cbuf = SharedCircBuf(read_ahead, example)
stop = multiprocessing.Event()
barrier = Barrier(n_procs)
# If ordering has been requested, create a synchronizer.
sync = GuardSynchronizer() if ordered else None
procs = []
for i in range(n_procs):
# Each process is offset in the dataset by i*block_size
# The skip length is set to n_procs*block_size so that no block is read by 2 processes.
process = multiprocessing.Process(target=_Streamer__read_process, args=(
self, path, block_size, cbuf, stop, barrier, cyclic,
i * block_size, n_procs * block_size, sync
))
process.daemon = True
process.start()
procs.append(process)
# If the queue is not cyclic, then the cessation of reading data needs to be monitored.
if not cyclic:
# This closure defines a background thread that waits until all processes have finished.
# At this point, all data from the dataset has been read, and the buffer is closed.
def monitor():
for p in procs:
p.join()
cbuf.close()
monitor_thread = threading.Thread(target=monitor)
monitor_thread.daemon = True
monitor_thread.start()
return Streamer.Queue(cbuf, stop, block_size) |
def get_generator(self, path, *args, **kw_args):
"""
Get a generator that allows convenient access to the streamed data.
Elements from the dataset are returned from the generator one row at a time.
Unlike the direct access queue, this generator also returns the remainder elements.
Additional arguments are forwarded to get_queue.
See the get_queue method for documentation of these parameters.
:param path:
:return: A generator that iterates over the rows in the dataset.
"""
q = self.get_queue(path=path, *args, **kw_args)
try:
# This generator just implements a standard access pattern for the direct access queue.
for guard in q.iter():
with guard as batch:
batch_copy = batch.copy()
for row in batch_copy:
yield row
last_batch = self.get_remainder(path, q.block_size)
for row in last_batch:
yield row
finally:
q.close() |
def parse(ifp, pb_cls, **kwargs):
"""Parse a stream.
Args:
ifp (string or file-like object): input stream.
pb_cls (protobuf.message.Message.__class__): The class object of
the protobuf message type encoded in the stream.
"""
mode = 'rb'
if isinstance(ifp, str):
istream = open(ifp, mode=mode, **kwargs)
else:
istream = open(fileobj=ifp, mode=mode, **kwargs)
with istream:
for data in istream:
pb_obj = pb_cls()
pb_obj.ParseFromString(data)
yield pb_obj |
def dump(ofp, *pb_objs, **kwargs):
"""Write to a stream.
Args:
ofp (string or file-like object): output stream.
pb_objs (*protobuf.message.Message): list of protobuf message objects
to be written.
"""
mode = 'wb'
if isinstance(ofp, str):
ostream = open(ofp, mode=mode, **kwargs)
else:
ostream = open(fileobj=ofp, mode=mode, **kwargs)
with ostream:
ostream.write(*pb_objs) |
def _read_varint(self):
"""Read a varint from file, parse it, and return the decoded integer.
"""
buff = self._fd.read(1)
if buff == b'':
return 0
while (bytearray(buff)[-1] & 0x80) >> 7 == 1: # while the MSB is 1
new_byte = self._fd.read(1)
if new_byte == b'':
raise EOFError('unexpected EOF.')
buff += new_byte
varint, _ = decodeVarint(buff, 0)
return varint |
def _get_objs(self):
"""A generator yielding all protobuf object data in the file. It is the
main parser of the stream encoding.
"""
while True:
count = self._read_varint()
if count == 0:
break
# Read a group containing `count` number of objects.
for _ in range(count):
size = self._read_varint()
if size == 0:
raise EOFError('unexpected EOF.')
# Read an object from the object group.
yield self._fd.read(size)
if self._group_delim:
yield self._delimiter() if self._delimiter is not None \
else None |
def close(self):
"""Close the stream."""
self.flush()
if self._myfd is not None:
self._myfd.close()
self._myfd = None |
def write(self, *pb2_obj):
"""Write a group of one or more protobuf objects to the file. Multiple
object groups can be written by calling this method several times
before closing stream or exiting the runtime context.
The input protobuf objects get buffered and will be written down when
the number of buffered objects exceed the `self._buffer_size`.
Args:
pb2_obj (*protobuf.message.Message): list of protobuf messages.
"""
base = len(self._write_buff)
for idx, obj in enumerate(pb2_obj):
if self._buffer_size > 0 and \
(idx + base) != 0 and \
(idx + base) % self._buffer_size == 0:
self.flush()
self._write_buff.append(obj)
if self._buffer_size == 0:
self.flush() |
def flush(self):
"""Write down buffer to the file."""
if not self.is_output():
return
count = len(self._write_buff)
if count == 0:
return
encodeVarint(self._fd.write, count, True)
for obj in self._write_buff:
obj_str = obj.SerializeToString()
encodeVarint(self._fd.write, len(obj_str), True)
self._fd.write(obj_str)
self._write_buff = [] |
def get_game_dir(self, username=False):
"""Returns joined game directory path relative to Steamapps"""
if not self.common and not username:
raise RuntimeError("Can't determine this game's directory without username")
if self.common:
subdir = "common"
else:
subdir = "username"
subsubdir = self.dir
if WIN32 or CYGWIN:
subsubdir = subsubdir.lower()
return os.path.join(subdir, subsubdir) |
def _get_MAP_spikes(F, c_hat, theta, dt, tol=1E-6, maxiter=100, verbosity=0):
"""
Used internally by deconvolve to compute the maximum a posteriori
spike train for a given set of fluorescence traces and model parameters.
See the documentation for deconvolve for the meaning of the
arguments
Returns: n_hat_best, c_hat_best, LL_best
"""
npix, nt = F.shape
sigma, alpha, beta, lamb, gamma = theta
# we project everything onto the alpha mask so that we only ever have to
# deal with 1D vector norms
alpha_ss = np.dot(alpha, alpha)
c = np.dot(alpha, F) - np.dot(alpha, beta)
# used for computing the LL and gradient
scale_var = 1. / (2 * sigma * sigma)
lD = lamb * dt
# used for computing the gradient (M.T.dot(lamb * dt))
grad_lnprior = np.zeros(nt, dtype=DTYPE)
grad_lnprior[1:] = lD
grad_lnprior[:-1] -= lD * gamma
# initialize the weight of the barrier term to 1
z = 1.
# initial estimate of spike probabilities
n_hat = c_hat[1:] - gamma * c_hat[:-1]
# assert not np.any(n_hat < 0), "spike probabilities < 0"
# (actual - predicted) fluorescence
res = c - alpha_ss * c_hat
# best overall posterior log-likelihood of the fluorescence
LL_best = _post_LL(n_hat, res, scale_var, lD, z)
LL_barrier = LL_best
nloop1 = 0
terminate_interior = False
# in the outer loop we'll progressively reduce the weight of the barrier
# term and check the interior point termination criteria
while not terminate_interior:
nloop2 = 0
terminate_barrier = False
# converge for this barrier weight
while not terminate_barrier:
# by projecting everything onto alpha, we reduce this to a 1D
# vector norm
res = c - alpha_ss * c_hat
# compute direction of newton step
d = _direction(n_hat, res, alpha_ss, gamma, scale_var,
grad_lnprior, z)
terminate_linesearch = False
# find the largest step we can take in direction d without
# violating the non-negativity constraint on n_hat
s_upper_bnd = -n_hat / (d[1:] - gamma * d[:-1])
# we are only interested in positive step sizes
feasible = (s_upper_bnd > 0)
if np.any(feasible):
# largest allowable step size is 1.
s = min(1., 0.999 * np.min(s_upper_bnd[feasible]))
else:
# if there is no step size that will keep n_hat >= 0, just
# reduce the barrier weight and try again
terminate_linesearch = True
terminate_barrier = True
if verbosity >= 2:
print("skipping linesearch: no positive step size will "
"keep n_hat >= 0")
nloop3 = 0
# backtracking line search for the largest step size that increases
# the posterior log-likelihood of the fluorescence
while not terminate_linesearch:
# update estimated calcium
c_hat_line = c_hat + (s * d)
# update spike probabilities
n_hat_line = c_hat_line[1:] - gamma * c_hat_line[:-1]
# assert not np.any(n_hat_line < 0), "spike probabilities < 0"
# (actual - predicted) fluorescence
res = c - alpha_ss * c_hat_line
# compute the new posterior log-likelihood
LL_line = _post_LL(n_hat_line, res, scale_var, lD, z)
# assert not np.any(np.isnan(LL1)), "nan LL"
if verbosity >= 2:
print('spikes: iter=%3i, %3i, %3i; z=%-10.4g; s=%-10.4g;'
' LL=%-10.4g'
% (nloop1, nloop2, nloop3, z, s, LL_line))
# if the step size gets too small without making any progress,
# we terminate the linesearch and reduce the barrier weight
if s < S_TOL:
if verbosity >= 2:
print('--> terminated linesearch: s < %.3g on %i '
'iterations' % (S_TOL, nloop3))
terminate_linesearch = True
terminate_barrier = True
# only update c_hat & LL if LL improved
if LL_line > LL_barrier:
LL_barrier, n_hat, c_hat = LL_line, n_hat_line, c_hat_line
terminate_linesearch = True
# reduce the step size
else:
s /= S_FAC
nloop3 += 1
# if d gets too small, reduce the barrier weight
if (np.linalg.norm(d) < D_TOL):
terminate_barrier = True
nloop2 += 1
# only test for convergence if we were actually able to enter the
# linesearch
if nloop3:
delta_LL = -(LL_barrier - LL_best) / LL_best
LL_best = LL_barrier
if (delta_LL < tol):
terminate_interior = True
elif z < Z_TOL:
if verbosity >= 2:
print('MAP spike train failed to converge before z < %.3g'
% Z_TOL)
terminate_interior = True
elif nloop1 > maxiter:
if verbosity >= 2:
print('MAP spike train failed to converge within maxiter (%i)'
% maxiter)
terminate_interior = True
# increment the outer loop counter, reduce the barrier weight
nloop1 += 1
z /= Z_FAC
return n_hat, c_hat, LL_best |
def trisolve(dl, d, du, b, inplace=False):
"""
The tridiagonal matrix (Thomas) algorithm for solving tridiagonal systems
of equations:
a_{i}x_{i-1} + b_{i}x_{i} + c_{i}x_{i+1} = y_{i}
in matrix form:
Mx = b
TDMA is O(n), whereas standard Gaussian elimination is O(n^3).
Arguments:
-----------
dl: (n - 1,) vector
the lower diagonal of M
d: (n,) vector
the main diagonal of M
du: (n - 1,) vector
the upper diagonal of M
b: (n,) vector
the result of Mx
inplace:
if True, and if d and b are both float64 vectors, they will be
modified in place (may be faster)
Returns:
-----------
x: (n,) vector
the solution to Mx = b
References:
-----------
http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
http://www.netlib.org/lapack/explore-html/d1/db3/dgtsv_8f.html
"""
if (dl.shape[0] != du.shape[0] or (d.shape[0] != dl.shape[0] + 1)
or d.shape[0] != b.shape[0]):
raise ValueError('Invalid diagonal shapes')
bshape_in = b.shape
rtype = np.result_type(dl, d, du, b)
if not inplace:
# force a copy
dl = np.array(dl, dtype=rtype, copy=True, order='F')
d = np.array(d, dtype=rtype, copy=True, order='F')
du = np.array(du, dtype=rtype, copy=True, order='F')
b = np.array(b, dtype=rtype, copy=True, order='F')
# this may also force copies if arrays have inconsistent types / incorrect
# order
dl, d, du, b = (np.array(v, dtype=rtype, copy=False, order='F')
for v in (dl, d, du, b))
# use the LAPACK implementation
_lapack_trisolve(dl, d, du, b, rtype)
return b.reshape(bshape_in) |
def from_web_element(self, web_element):
"""
Store reference to a WebElement instance representing the element on the DOM.
Use it when an instance of WebElement has already been created (e.g. as the result of find_element) and
you want to create a UIComponent out of it without evaluating it from the locator again.
Returns an instance of the class.
"""
if isinstance(web_element, WebElement) is not True:
raise TypeError("web_element parameter is not of type WebElement.")
self._web_element = web_element
return self |
def locate(self):
"""
Lazily locates the element on the DOM if the WebElement instance is not available already.
Returns a WebElement object.
It also caches the element if caching has been set through cache().
"""
if self._web_element:
return self._web_element
else:
locator_type, locator_value = self.__locator
element = self.driver.find_element(by=locator_type, value=locator_value)
self._cache_web_element(element) # cache the element if allowed
return element |
def input_text_with_keyboard_emulation(self, text):
"""
Works around the problem of emulating user interactions with text inputs.
Emulates a key-down action on the first char of the input. This way, implementations which
require key-down event to trigger auto-suggest are testable.
Then the chains sends the rest of the text and releases the key.
"""
ActionChains(self.driver).key_down(text).key_up(Keys.CONTROL).perform() |
def make_fake_movie(nframes, mask_shape=(64, 64), mask_center=None,
bg_intensity=0.1, mask_sigma=10, dt=0.02, rate=1.0,
tau=1., sigma=0.001, seed=None):
"""
Generate 2D fake fluorescence movie
Arguments:
---------------------------------------------------------------------------
nframes: number of timebins to simulate
mask_shape: tuple (nrows, ncols), shape of a single movie frame
mask_center: tuple (x, y), pixel coords of cell center
bg_intensity: scalar, amplitude of (static) baseline fluorescence
mask_sigma: scalar, standard deviation of Gaussian mask
dt: timestep (s)
rate: mean spike rate (Hz)
tau: time constant of decay in calcium concentration (s)
sigma: SD of additive noise on fluorescence
seed: Seed for RNG
Returns:
---------------------------------------------------------------------------
F: fluorescence [npixels, nframes]
c: calcium concentration [nframes,]
n: spike train [nframes,]
theta: tuple of true model parameters:
(sigma, alpha, beta, lambda, gamma)
"""
gen = np.random.RandomState(seed)
# poisson spikes
n = gen.poisson(rate * dt, size=nframes)
# internal calcium dynamics
gamma = np.exp(-dt / tau)
c = signal.lfilter(np.r_[1], np.r_[1, -gamma], n, axis=0)
# pixel weights (sum == 1)
nr, nc = mask_shape
npix = nr * nc
if mask_center is None:
mask_center = (nc // 2., nr // 2.)
a, b = mask_center
y, x = np.ogrid[:nr, :nc]
xs = (x - a) ** 2.
ys = (y - b) ** 2.
twoss = 2. * mask_sigma ** 2.
alpha = np.exp(-1 * ((xs / twoss) + (ys / twoss))).ravel()
alpha /= alpha.sum()
# background fluorescence
beta = gen.randn(npix) * bg_intensity
# firing rate (spike probability per sec)
lamb = rate
# spatially & temporally white noise
epsilon = gen.randn(npix, nframes) * sigma
# simulated fluorescence
F = c[None, :] * alpha[:, None] + beta[:, None] + epsilon
theta = (sigma, alpha, beta, lamb, gamma)
return F, c, n, theta |
def evaluate_traits(self):
"""
Evaluates traits and returns a list containing the description of traits which are not true.
Notice that if LAZY_EVALUATION is set to False all traits are evaluated before returning. Use this option
only for debugging purposes.
"""
return_value = []
for trait in self.traits:
if not trait.condition():
if not self.traits_eager_evaluation:
return [trait.description]
else:
return_value.append(trait.description)
return return_value |
def until_condition(self, condition, condition_description):
"""
Waits until conditions is True or returns a non-None value.
If any of the trait is still not present after timeout, raises a TimeoutException.
"""
end_time = time.time() + self._timeout
count = 1
while True:
try:
if not hasattr(condition, '__call__'):
raise TypeError("condition is not callable")
value = condition()
if type(value) is bool and value is not False:
return value
elif type(value) is not bool and value is not None:
return value
else:
logger.debug("#" + str(count) + " - wait until " + condition_description) # pragma: no cover
except self._ignored_exceptions as ex:
logger.debug("Captured {0} : {1}".format(str(ex.__class__).replace("<type '", "").replace("'>", ""),
str(ex))) # pragma: no cover
time.sleep(self._poll)
count += 1
if time.time() > end_time: # pragma: no cover
break
raise TimeoutException(
msg="condition <" + condition_description + "> was not true after " + str(self._timeout) + " seconds.") |
def until_traits_are_present(self, element_with_traits):
"""
Waits until all traits are present.
If any of the traits is still not present after timeout, raises a TimeoutException.
"""
end_time = time.time() + self._timeout
count = 1
missing_traits_descriptions = None
while True:
missing_traits_descriptions = []
try:
missing_traits_descriptions = element_with_traits.evaluate_traits()
if len(missing_traits_descriptions) == 0:
return True
else:
logger.debug("#{0} - wait until all traits are present: <{1}>".format(str(count), '> <'.join(
missing_traits_descriptions)))
except self._ignored_exceptions as ex: # pragma: no cover
logger.debug("Captured {0}: {1}".format(str(ex.__class__).replace("<type '", "").replace("'>", ""),
str(ex))) # pragma: no cover
pass # pragma: no cover
time.sleep(self._poll)
count += 1
if time.time() > end_time:
break
raise TimeoutException(
msg="conditions " + '<' + '> <'.join(missing_traits_descriptions) + '>' + " not true after " + str(
self._timeout) + " seconds.") |
def with_ignored_exceptions(self, *ignored_exceptions):
"""
Set a list of exceptions that should be ignored inside the wait loop.
"""
for exception in ignored_exceptions:
self._ignored_exceptions = self._ignored_exceptions + (exception,)
return self |
def s2h(ss):
"""convert seconds to a pretty "d hh:mm:ss.s" format"""
mm, ss = divmod(ss, 60)
hh, mm = divmod(mm, 60)
dd, hh = divmod(hh, 24)
tstr = "%02i:%04.1f" % (mm, ss)
if hh > 0:
tstr = ("%02i:" % hh) + tstr
if dd > 0:
tstr = ("%id " % dd) + tstr
return tstr |
def exec_command(self, domain, function, operator, value=None):
"""
Write a command to the receiver and read the value it returns.
The receiver will always return a value, also when setting a value.
"""
if operator in CMDS[domain][function]['supported_operators']:
if operator is '=' and value is None:
raise ValueError('No value provided')
if value is None:
cmd = ''.join([CMDS[domain][function]['cmd'], operator])
else:
cmd = ''.join(
[CMDS[domain][function]['cmd'], operator, str(value)])
else:
raise ValueError('Invalid operator provided %s' % operator)
if not self.ser.is_open:
self.ser.open()
try:
self.lock.acquire()
self.ser.write(''.join(['\r', cmd, '\r']).encode('utf-8'))
time.sleep(0.1)
# not sure why, but otherwise it is not ready yet to do the read.
msg = self.ser.read(self.ser.in_waiting)
try:
msg = msg.decode()[1:-1]
msg = msg.split('=')[1]
return msg
except IndexError:
pass
finally:
self.lock.release() |
def main_volume(self, operator, value=None):
"""
Execute Main.Volume.
Returns int
"""
try:
res = int(self.exec_command('main', 'volume', operator, value))
return res
except (ValueError, TypeError):
pass
return None |
def main_source(self, operator, value=None):
"""
Execute Main.Source.
Returns int
"""
try:
source = int(self.exec_command('main', 'source', operator, value))
return source
except (ValueError, TypeError):
pass
return None |
def _send(self, message, read_reply=False):
"""Send a command string to the amplifier."""
sock = None
for tries in range(0, 3):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self._host, self.PORT))
break
except (ConnectionError, BrokenPipeError):
if tries == 3:
print("socket connect failed.")
return
sleep(0.1)
sock.send(codecs.decode(message, 'hex_codec'))
if read_reply:
sleep(0.1)
reply = ''
tries = 0
max_tries = 20
while len(reply) < len(message) and tries < max_tries:
try:
reply += codecs.encode(sock.recv(self.BUFFERSIZE), 'hex')\
.decode("utf-8")
except (ConnectionError, BrokenPipeError):
pass
tries += 1
sock.close()
if tries >= max_tries:
return
return reply
sock.close() |
def status(self):
"""
Return the status of the device.
Returns a dictionary with keys 'volume' (int 0-200) , 'power' (bool),
'muted' (bool) and 'source' (str).
"""
nad_reply = self._send(self.POLL_VOLUME +
self.POLL_POWER +
self.POLL_MUTED +
self.POLL_SOURCE, read_reply=True)
if nad_reply is None:
return
# split reply into parts of 10 characters
num_chars = 10
nad_status = [nad_reply[i:i + num_chars]
for i in range(0, len(nad_reply), num_chars)]
return {'volume': int(nad_status[0][-2:], 16),
'power': nad_status[1][-2:] == '01',
'muted': nad_status[2][-2:] == '01',
'source': self.SOURCES_REVERSED[nad_status[3][-2:]]} |
def power_off(self):
"""Power the device off."""
status = self.status()
if status['power']: # Setting power off when it is already off can cause hangs
self._send(self.CMD_POWERSAVE + self.CMD_OFF) |
def power_on(self):
"""Power the device on."""
status = self.status()
if not status['power']:
self._send(self.CMD_ON, read_reply=True)
sleep(0.5) |
def set_volume(self, volume):
"""Set volume level of the device. Accepts integer values 0-200."""
if 0 <= volume <= 200:
volume = format(volume, "02x") # Convert to hex
self._send(self.CMD_VOLUME + volume) |
def select_source(self, source):
"""Select a source from the list of sources."""
status = self.status()
if status['power']: # Changing source when off may hang NAD7050
if status['source'] != source: # Setting the source to the current source will hang the NAD7050
if source in self.SOURCES:
self._send(self.CMD_SOURCE + self.SOURCES[source], read_reply=True) |
def exec_command(self, domain, function, operator, value=None):
"""
Write a command to the receiver and read the value it returns.
"""
if operator in CMDS[domain][function]['supported_operators']:
if operator is '=' and value is None:
raise ValueError('No value provided')
if value is None:
cmd = ''.join([CMDS[domain][function]['cmd'], operator])
else:
cmd = ''.join(
[CMDS[domain][function]['cmd'], operator, str(value)])
else:
raise ValueError('Invalid operator provided %s' % operator)
if self._open_connection():
# For telnet the first \r / \n is recommended only
self.telnet.write((''.join(['\r', cmd, '\n']).encode()))
# Could raise eg. socket.error, UnicodeError, let the client handle it
# Test 3 x buffer is completely empty
# With the default timeout that means a delay at
# about 3+ seconds
loop = 3
while loop:
msg = self.telnet.read_until('\n'.encode(), self.timeout)
# Could raise eg. EOFError, UnicodeError, let the client handle it
if msg == "":
# Nothing in buffer
loop -= 1
continue
msg = msg.decode().strip('\r\n')
# Could raise eg. UnicodeError, let the client handle it
#print("NAD reponded with '%s'" % msg)
# Wait for the response that equals the requested domain.function
if msg.strip().split('=')[0].lower() == '.'.join([domain, function]).lower():
# b'Main.Volume=-12\r will return -12
return msg.strip().split('=')[1]
raise RuntimeError('Failed to read response')
raise RuntimeError('Failed to open connection') |
def deobfuscate(request, key, juice=None):
"""
Deobfuscates the URL and returns HttpResponse from source view.
SEO juice is mostly ignored as it is intended for display purposes only.
"""
try:
url = decrypt(str(key),
settings.UNFRIENDLY_SECRET,
settings.UNFRIENDLY_IV,
checksum=settings.UNFRIENDLY_ENFORCE_CHECKSUM)
except (CheckSumError, InvalidKeyError):
return HttpResponseNotFound()
try:
url = url.decode('utf-8')
except UnicodeDecodeError:
return HttpResponseNotFound()
url_parts = urlparse(unquote(url))
path = url_parts.path
query = url_parts.query
try:
view, args, kwargs = resolve(path)
except Resolver404:
return HttpResponseNotFound()
# fix-up the environ object
environ = request.environ.copy()
environ['PATH_INFO'] = path[len(environ['SCRIPT_NAME']):]
environ['QUERY_STRING'] = query
# init a new request
patched_request = request.__class__(environ)
# copy over any missing request attributes - this feels hackish
missing_items = set(dir(request)) - set(dir(patched_request))
while missing_items:
missing_item = missing_items.pop()
patched_request.__setattr__(missing_item,
request.__getattribute__(missing_item))
# mark this request as obfuscated
patched_request.META['obfuscated'] = True
response = view(patched_request, *args, **kwargs)
# offer up a friendlier juice-powered filename if downloaded
if juice and not response.has_header('Content-Disposition'):
response['Content-Disposition'] = 'inline; filename=%s' % juice
return response |
def _lazysecret(secret, blocksize=32, padding='}'):
"""Pads secret if not legal AES block size (16, 24, 32)"""
if not len(secret) in (16, 24, 32):
return secret + (blocksize - len(secret)) * padding
return secret |
def _crc(plaintext):
"""Generates crc32. Modulo keep the value within int range."""
if not isinstance(plaintext, six.binary_type):
plaintext = six.b(plaintext)
return (zlib.crc32(plaintext) % 2147483647) & 0xffffffff |
def encrypt(plaintext, secret, inital_vector, checksum=True, lazy=True):
"""Encrypts plaintext with secret
plaintext - content to encrypt
secret - secret to encrypt plaintext
inital_vector - initial vector
lazy - pad secret if less than legal blocksize (default: True)
checksum - attach crc32 byte encoded (default: True)
returns ciphertext
"""
if not isinstance(plaintext, six.binary_type):
plaintext = six.b(plaintext)
secret = _lazysecret(secret) if lazy else secret
encobj = AES.new(secret, AES.MODE_CFB, inital_vector)
if checksum:
packed = _pack_crc(plaintext)
plaintext += base64.urlsafe_b64encode(packed)
encoded = base64.urlsafe_b64encode(encobj.encrypt(plaintext))
if isinstance(plaintext, six.binary_type):
encoded = encoded.decode()
return encoded.replace('=', '') |
def decrypt(ciphertext, secret, inital_vector, checksum=True, lazy=True):
"""Decrypts ciphertext with secret
ciphertext - encrypted content to decrypt
secret - secret to decrypt ciphertext
inital_vector - initial vector
lazy - pad secret if less than legal blocksize (default: True)
checksum - verify crc32 byte encoded checksum (default: True)
returns plaintext
"""
secret = _lazysecret(secret) if lazy else secret
encobj = AES.new(secret, AES.MODE_CFB, inital_vector)
try:
padded = ciphertext + ('=' * (len(ciphertext) % 4))
decoded = base64.urlsafe_b64decode(str(padded))
plaintext = encobj.decrypt(decoded)
except (TypeError, binascii.Error):
raise InvalidKeyError("invalid key")
if checksum:
try:
crc, plaintext = (base64.urlsafe_b64decode(
plaintext[-8:]), plaintext[:-8])
except (TypeError, binascii.Error):
raise CheckSumError("checksum mismatch")
if not crc == _pack_crc(plaintext):
raise CheckSumError("checksum mismatch")
return plaintext |
def obfuscate(value, juice=None):
"""
Template filter that obfuscates whatever text it is applied to. The text is
supposed to be a URL, but it will obfuscate anything.
Usage:
Extremely unfriendly URL:
{{ "/my-secret-path/"|obfuscate }}
Include some SEO juice:
{{ "/my-secret-path/"|obfuscate:"some SEO friendly text" }}
"""
if not settings.UNFRIENDLY_ENABLE_FILTER:
return value
kwargs = {
'key': encrypt(value,
settings.UNFRIENDLY_SECRET,
settings.UNFRIENDLY_IV,
checksum=settings.UNFRIENDLY_ENFORCE_CHECKSUM),
}
if juice:
kwargs['juice'] = slugify(juice)
return reverse('unfriendly-deobfuscate', kwargs=kwargs) |
def missing_schema(self,html,song_name):
'''
It will print the list of songs that can be downloaded
'''
#html=self.get_html_response(url)
soup=BeautifulSoup(html)
name=' '.join(song_name)
print '%s not found'%name
print "But you can download any of the following songs :"
a_list=soup.findAll('a','touch')
for x in xrange(len(a_list)-1):
r=a_list[x]
p=str(r)
q=re.sub(r'<a.*/>|<span.*">|</span>|</a>|<a.*html">|<font.*">|</font>','',p)
print q |
def list_of_all_href(self,html):
'''
It will return all hyper links found in the mr-jatt page for download
'''
soup=BeautifulSoup(html)
links=[]
a_list=soup.findAll('a','touch')
for x in xrange(len(a_list)-1):
link = a_list[x].get('href')
name = a_list[x]
name = str(name)
name=re.sub(r'<a.*/>|<span.*">|</span>|</a>|<a.*html">|<font.*">|</font>','',name)
name=re.sub(r'^[0-9]+\.','',name)
links.append([link,name])
#quit()
return links |
def check_if_song_name(self,html):
'''
Returns true if user entered artist or movie name
'''
soup=BeautifulSoup(html)
a_list=soup.findAll('a','touch')
#print a_list
text=[str(x) for x in a_list]
text=''.join(text)
text=text.lower()
string1='download in 48 kbps'
string2='download in 128 kbps'
string3='download in 320 kbps'
href=''
if string3 in text:
#print 'Downloading in 320 kbps'
href=a_list[2].get('href')
elif string2 in text:
#print 'Downloading in 128 kbps'
href=a_list[1].get('href')
elif string1 in text:
#print 'Downloading in 48 kbps'
href=a_list[0].get('href')
else:
return (True,'nothing')
return (False,href) |
def Parse(self,url,song_name,flag):
'''
It will the resource URL if song is found,
Otherwise it will return the list of songs that can be downloaded
'''
file_download=FileDownload()
html=file_download.get_html_response(url)
if flag == False:
soup=BeautifulSoup(html)
a_list=soup.findAll('a','touch')
#print a_list
text=[str(x) for x in a_list]
text=''.join(text)
text=text.lower()
string1='download in 48 kbps'
string2='download in 128 kbps'
string3='download in 320 kbps'
href=''
if string3 in text:
print 'Downloading in 320 kbps'
href=a_list[2].get('href')
elif string2 in text:
print 'Downloading in 128 kbps'
href=a_list[1].get('href')
elif string1 in text:
print 'Downloading in 48 kbps'
href=a_list[0].get('href')
else:
self.missing_schema(html,song_name)
quit()
return href
else:
x,href=self.check_if_song_name(html)
links = []
if x==True:
links=self.list_of_all_href(html)
else:
file_download=FileDownload()
file_download.file_download_cross_platform(href)
quit()
return links |
def google_url(self,song_name,website):
''' It will return the google url to be searched'''
name='+'.join(song_name)
prefix='https://www.google.co.in/search?q='
website=website.split(" ")
suffix='+'.join(website)
url=prefix+name+suffix
#print url
return url |
def parse_google(self,html):
'''It will parse google html response
and return the first url
'''
soup = BeautifulSoup(html)
href=soup.find('div','g').find('a').get('href')
href_list=href.split('&')
download_url=href_list[0]
download_url=download_url.strip()
download_url=download_url.replace('/url?q=','')
return download_url |
def Parse(self,song_name,website):
'''
song_name is a list of strings
website is a string
It will return the url from where music file needs to be downloaded
'''
url_to_be_parsed=self.google_url(song_name,website)
file_download=FileDownload()
html=file_download.get_html_response(url_to_be_parsed)
website_url=self.parse_google(html)
return website_url |
def get_html_response(self,url):
'''It will download the html page specified by url and return the html response '''
print "Downloading page %s .."%url
try:
response=requests.get(url,timeout=50)
except requests.exceptions.SSLError:
try:
response=requests.get(url,verify=False,timeout=50)
except requests.exceptions.RequestException as e:
print e
quit()
except requests.exceptions.RequestException as e:
print e
quit()
return response.content |
def file_download_using_requests(self,url):
'''It will download file specified by url using requests module'''
file_name=url.split('/')[-1]
if os.path.exists(os.path.join(os.getcwd(),file_name)):
print 'File already exists'
return
#print 'Downloading file %s '%file_name
#print 'Downloading from %s'%url
try:
r=requests.get(url,stream=True,timeout=200)
except requests.exceptions.SSLError:
try:
response=requests.get(url,stream=True,verify=False,timeout=200)
except requests.exceptions.RequestException as e:
print e
quit()
except requests.exceptions.RequestException as e:
print e
quit()
chunk_size = 1024
total_size = int(r.headers['Content-Length'])
total_chunks = total_size/chunk_size
file_iterable = r.iter_content(chunk_size = chunk_size)
tqdm_iter = tqdm(iterable = file_iterable,total = total_chunks,unit = 'KB',
leave = False
)
with open(file_name,'wb') as f:
for data in tqdm_iter:
f.write(data)
#total_size=float(r.headers['Content-Length'])/(1024*1024)
'''print 'Total size of file to be downloaded %.2f MB '%total_size
total_downloaded_size=0.0
with open(file_name,'wb') as f:
for chunk in r.iter_content(chunk_size=1*1024*1024):
if chunk:
size_of_chunk=float(len(chunk))/(1024*1024)
total_downloaded_size+=size_of_chunk
print '{0:.0%} Downloaded'.format(total_downloaded_size/total_size)
f.write(chunk)'''
print 'Downloaded file %s '%file_name |
def file_download_using_wget(self,url):
'''It will download file specified by url using wget utility of linux '''
file_name=url.split('/')[-1]
print 'Downloading file %s '%file_name
command='wget -c --read-timeout=50 --tries=3 -q --show-progress --no-check-certificate '
url='"'+url+'"'
command=command+url
os.system(command) |
def main():
"""Main CLI entrypoint."""
#print VERSION
from commands.download import Download
options = docopt(__doc__, version=VERSION)
#print "You reached here"
#print options
print "working."
p=Download(options)
p.run() |
def ReadingBloomFilter(filename, want_lock=False):
"""
Create a read-only bloom filter with an upperbound of
(num_elements, max_fp_prob) as a specification and using filename
as the backing datastore.
"""
with open('{}.desc'.format(filename), 'r') as descriptor:
num_elements = int(descriptor.readline())
max_fp_prob = float(descriptor.readline())
ignore_case = int(descriptor.readline())
return _hydra.BloomFilter.getFilter(
num_elements, max_fp_prob,
filename=filename, ignore_case=ignore_case,
read_only=True, want_lock=want_lock) |
def WritingBloomFilter(num_elements, max_fp_prob, filename=None,
ignore_case=False, want_lock=False,
fdatasync_on_close=True):
"""
Create a read/write bloom filter with an upperbound of
(num_elements, max_fp_prob) as a specification and using filename
as the backing datastore.
"""
new_filter = _hydra.BloomFilter.getFilter(
num_elements, max_fp_prob,
filename=filename, ignore_case=ignore_case,
read_only=False, want_lock=want_lock,
fdatasync_on_close=fdatasync_on_close)
if filename:
with open('{}.desc'.format(filename), 'w') as descriptor:
descriptor.write("{}\n".format(num_elements))
descriptor.write("{:0.8f}\n".format(max_fp_prob))
descriptor.write("{:d}\n".format(ignore_case))
return new_filter |
def findStationCodesByCity(city_name, token):
"""Lookup AQI database for station codes in a given city."""
req = requests.get(
API_ENDPOINT_SEARCH,
params={
'token': token,
'keyword': city_name
})
if req.status_code == 200 and req.json()["status"] == "ok":
return [result["uid"] for result in req.json()["data"]]
else:
return [] |
def get_location_observation(lat, lng, token):
"""Lookup observations by geo coordinates."""
req = requests.get(
API_ENDPOINT_GEO % (lat, lng),
params={
'token': token
})
if req.status_code == 200 and req.json()["status"] == "ok":
return parse_observation_response(req.json()["data"])
return {} |
def parse_observation_response(json):
"""Decode AQICN observation response JSON into python object."""
logging.debug(json)
iaqi = json['iaqi']
result = {
'idx': json['idx'],
'city': json.get('city', ''),
'aqi': json['aqi'],
'dominentpol': json.get("dominentpol", ''),
'time': json['time']['s'],
'iaqi': [{'p': item, 'v': iaqi[item]['v']} for item in iaqi]
}
return result |
def get_station_observation(station_code, token):
"""Request station data for a specific station identified by code.
A language parameter can also be specified to translate location
information (default: "en")
"""
req = requests.get(
API_ENDPOINT_OBS % (station_code),
params={
'token': token
})
if req.status_code == 200 and req.json()['status'] == "ok":
return parse_observation_response(req.json()['data'])
else:
return {} |
def search_paths(self):
"""The list of logical paths which are used to search for an asset.
This property makes sense only if the attributes was created with
logical path.
It is assumed that the logical path can be a directory containing a
file named ``index`` with the same suffix.
Example::
>>> attrs = AssetAttributes(environment, 'js/app.js')
>>> attrs.search_paths
['js/app.js', 'js/app/index.js']
>>> attrs = AssetAttributes(environment, 'js/app/index.js')
>>> attrs.search_paths
['js/models/index.js']
"""
paths = [self.path]
if os.path.basename(self.path_without_suffix) != 'index':
path = os.path.join(self.path_without_suffix, 'index')
paths.append(path + ''.join(self.suffix))
return paths |
def path_without_suffix(self):
"""The relative path to asset without suffix.
Example::
>>> attrs = AssetAttributes(environment, 'js/app.js')
>>> attrs.path_without_suffix
'js/app'
"""
if self.suffix:
return self.path[:-len(''.join(self.suffix))]
return self.path |
def logical_path(self):
"""The logical path to asset.
Example::
>>> attrs = AssetAttributes(environment, 'js/models.js.coffee')
>>> attrs.logical_path
'js/models.js'
"""
format_extension = self.format_extension or self.compiler_format_extension
if format_extension is None:
return self.path
return self.path_without_suffix + format_extension |
def extensions(self):
"""The list of asset extensions.
Example::
>>> attrs = AssetAttributes(environment, 'js/models.js.coffee')
>>> attrs.extensions
['.js', '.coffee']
>>> attrs = AssetAttributes(environment, 'js/lib/external.min.js.coffee')
>>> attrs.format_extension
['.min', '.js', '.coffee']
"""
return re.findall(r'\.[^.]+', os.path.basename(self.path)) |
def format_extension(self):
"""The format extension of asset.
Example::
>>> attrs = AssetAttributes(environment, 'js/models.js.coffee')
>>> attrs.format_extension
'.js'
>>> attrs = AssetAttributes(environment, 'js/lib/external.min.js.coffee')
>>> attrs.format_extension
'.js'
"""
for extension in reversed(self.extensions):
compiler = self.environment.compilers.get(extension)
if not compiler and self.environment.mimetypes.get(extension):
return extension |
def unknown_extensions(self):
"""The list of unknown extensions, which are actually parts of asset
filename. Example::
>>> attrs = AssetAttributes(environment, 'js/lib-2.0.min.js')
>>> attrs.suffix
['.0', '.min']
"""
unknown_extensions = []
for extension in self.extensions:
compiler = self.environment.compilers.get(extension)
if compiler or self.environment.mimetypes.get(extension):
return unknown_extensions
unknown_extensions.append(extension)
return unknown_extensions |
def compiler_extensions(self):
"""The list of compiler extensions.
Example::
>>> attrs = AssetAttributes(environment, 'js/lib/external.min.js.coffee')
>>> attrs.compiler_extensions
['.coffee']
"""
try:
index = self.extensions.index(self.format_extension)
except ValueError:
index = 0
extensions = self.extensions[index:]
return [e for e in extensions if self.environment.compilers.get(e)] |
def compilers(self):
"""The list of compilers used to build asset."""
return [self.environment.compilers.get(e) for e in self.compiler_extensions] |
def processors(self):
"""The list of all processors (preprocessors, compilers,
postprocessors) used to build asset.
"""
return self.preprocessors + list(reversed(self.compilers)) + self.postprocessors |
def mimetype(self):
"""MIME type of the asset."""
return (self.environment.mimetypes.get(self.format_extension) or
self.compiler_mimetype or 'application/octet-stream') |
def compiler_mimetype(self):
"""Implicit MIME type of the asset by its compilers."""
for compiler in reversed(self.compilers):
if compiler.result_mimetype:
return compiler.result_mimetype
return None |
def compiler_format_extension(self):
"""Implicit format extension on the asset by its compilers."""
for extension, mimetype in self.environment.mimetypes.items():
if mimetype == self.compiler_mimetype:
return extension
return None |
def register(self, mimetype, processor):
"""Register passed `processor` for passed `mimetype`."""
if mimetype not in self or processor not in self[mimetype]:
self.setdefault(mimetype, []).append(processor) |
def unregister(self, mimetype, processor):
"""Remove passed `processor` for passed `mimetype`. If processor for
this MIME type does not found in the registry, nothing happens.
"""
if mimetype in self and processor in self[mimetype]:
self[mimetype].remove(processor) |
def register_defaults(self):
"""Register :class:`~gears.processors.DirectivesProcessor` as
a preprocessor for `text/css` and `application/javascript` MIME types.
"""
self.register('text/css', DirectivesProcessor.as_handler())
self.register('application/javascript', DirectivesProcessor.as_handler()) |
def suffixes(self):
"""The registry for supported suffixes of assets. It is built from
MIME types and compilers registries, and is cached at the first call.
See :class:`~gears.environment.Suffixes` for more information.
"""
if not hasattr(self, '_suffixes'):
suffixes = Suffixes()
for extension, mimetype in self.mimetypes.items():
suffixes.register(extension, root=True, mimetype=mimetype)
for extension, compiler in self.compilers.items():
suffixes.register(extension, to=compiler.result_mimetype)
self._suffixes = suffixes
return self._suffixes |
def paths(self):
"""The list of search paths. It is built from registered finders, which
has ``paths`` property. Can be useful for compilers to resolve internal
dependencies.
"""
if not hasattr(self, '_paths'):
paths = []
for finder in self.finders:
if hasattr(finder, 'paths'):
paths.extend(finder.paths)
self._paths = paths
return self._paths |
def register_defaults(self):
"""Register default compilers, preprocessors and MIME types."""
self.mimetypes.register_defaults()
self.preprocessors.register_defaults()
self.postprocessors.register_defaults() |
def register_entry_points(self, exclude=()):
"""Allow Gears plugins to inject themselves to the environment. For
example, if your plugin's package contains such ``entry_points``
definition in ``setup.py``, ``gears_plugin.register`` function will be
called with current environment during ``register_entry_points`` call::
entry_points = {
'gears': [
'register = gears_plugin:register',
],
}
Here is an example of such function::
def register(environment):
assets_dir = os.path.join(os.path.dirname(__file__), 'assets')
assets_dir = os.path.absolute_path(assets_dir)
environment.register(FileSystemFinder([assets_dir]))
If you want to disable this behavior for some plugins, list their
packages using ``exclude`` argument::
environment.register_entry_points(exclude=['plugin'])
"""
for entry_point in iter_entry_points('gears', 'register'):
if entry_point.module_name not in exclude:
register = entry_point.load()
register(self) |
def find(self, item, logical=False):
"""Find files using :attr:`finders` registry. The ``item`` parameter
can be an instance of :class:`~gears.asset_attributes.AssetAttributes`
class, a path to the asset or a logical path to the asset. If ``item``
is a logical path, `logical` parameter must be set to ``True``.
Returns a tuple with :class:`~gears.asset_attributes.AssetAttributes`
instance for found file path as first item, and absolute path to this
file as second item.
If nothing is found, :class:`gears.exceptions.FileNotFound` exception
is rased.
"""
if isinstance(item, AssetAttributes):
for path in item.search_paths:
try:
return self.find(path, logical)
except FileNotFound:
continue
raise FileNotFound(item.path)
if logical:
asset_attributes = AssetAttributes(self, item)
suffixes = self.suffixes.find(asset_attributes.mimetype)
if not suffixes:
return self.find(item)
path = asset_attributes.path_without_suffix
for suffix in suffixes:
try:
return self.find(path + suffix)
except FileNotFound:
continue
else:
for finder in self.finders:
try:
absolute_path = finder.find(item)
except FileNotFound:
continue
return AssetAttributes(self, item), absolute_path
raise FileNotFound(item) |
def list(self, path, mimetype=None):
"""Yield two-tuples for all files found in the directory given by
``path`` parameter. Result can be filtered by the second parameter,
``mimetype``, that must be a MIME type of assets compiled source code.
Each tuple has :class:`~gears.asset_attributes.AssetAttributes`
instance for found file path as first item, and absolute path to this
file as second item.
Usage example::
# Yield all files from 'js/templates' directory.
environment.list('js/templates/*')
# Yield only files that are in 'js/templates' directory and have
# 'application/javascript' MIME type of compiled source code.
environment.list('js/templates/*', mimetype='application/javascript')
"""
basename_pattern = os.path.basename(path)
if path.endswith('**'):
paths = [path]
else:
paths = AssetAttributes(self, path).search_paths
paths = map(lambda p: p if p.endswith('*') else p + '*', paths)
results = unique(self._list_paths(paths), lambda x: x[0])
for logical_path, absolute_path in results:
asset_attributes = AssetAttributes(self, logical_path)
if mimetype is not None and asset_attributes.mimetype != mimetype:
continue
basename = os.path.basename(asset_attributes.path_without_suffix)
if not fnmatch(basename, basename_pattern) and basename != 'index':
continue
yield asset_attributes, absolute_path |
def save(self):
"""Save handled public assets to :attr:`root` directory."""
for asset_attributes, absolute_path in self.list('**'):
logical_path = os.path.normpath(asset_attributes.logical_path)
check_asset = build_asset(self, logical_path, check=True)
if check_asset.is_public:
asset = build_asset(self, logical_path)
source = bytes(asset)
self.save_file(logical_path, source, asset.gzippable)
if self.fingerprinting:
self.save_file(asset.hexdigest_path, source, asset.gzippable)
self.manifest.files[logical_path] = asset.hexdigest_path
self.manifest.dump() |
def PopulateForm(self):
"""
+-----------------------------------------------------------------------+
| +--- splitter ------------------------------------------------------+ |
| | +-- list widget--------------+ +- IdaSettingsView -------------+ | |
| | | | | | | |
| | | - plugin name | | | | |
| | | - plugin name | | | | |
| | | - plugin name | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | +----------------------------+ +-------------------------------+ | |
| +-------------------------------------------------------------------+ |
+-----------------------------------------------------------------------+
"""
hbox = QtWidgets.QHBoxLayout(self.parent)
self._splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
self._plugin_list = QtWidgets.QListWidget()
plugin_names = set([])
for scope, fn in (("idb", ida_settings.IDASettings.get_idb_plugin_names),
("directory", ida_settings.IDASettings.get_directory_plugin_names),
("user", ida_settings.IDASettings.get_user_plugin_names),
("system", ida_settings.IDASettings.get_system_plugin_names)):
for plugin_name in fn():
plugin_names.add(plugin_name)
for plugin_name in plugin_names:
self._plugin_list.addItem(plugin_name)
self._splitter.addWidget(self._plugin_list)
hbox.addWidget(self._splitter)
self.parent.setLayout(hbox)
self._plugin_list.currentItemChanged.connect(self._handle_plugin_changed) |
def as_handler(cls, **initkwargs):
"""Converts the class into an actual handler function that can be used
when registering different types of processors in
:class:`~gears.environment.Environment` class instance.
The arguments passed to :meth:`as_handler` are forwarded to the
constructor of the class.
"""
@wraps(cls, updated=())
def handler(asset, *args, **kwargs):
return handler.handler_class(**initkwargs)(asset, *args, **kwargs)
handler.handler_class = cls
handler.supports_check_mode = cls.supports_check_mode
return handler |
def run(self, input):
"""Runs :attr:`executable` with ``input`` as stdin.
:class:`AssetHandlerError` exception is raised, if execution is failed,
otherwise stdout is returned.
"""
p = self.get_process()
output, errors = p.communicate(input=input.encode('utf-8'))
if p.returncode != 0:
raise AssetHandlerError(errors)
return output.decode('utf-8') |
def get_process(self):
"""Returns :class:`subprocess.Popen` instance with args from
:meth:`get_args` result and piped stdin, stdout and stderr.
"""
return Popen(self.get_args(), stdin=PIPE, stdout=PIPE, stderr=PIPE) |
def import_qtcore():
"""
This nasty piece of code is here to force the loading of IDA's
Qt bindings.
Without it, Python attempts to load PySide from the site-packages
directory, and failing, as it does not play nicely with IDA.
via: github.com/tmr232/Cute
"""
has_ida = False
try:
# if we're running under IDA,
# then we'll use IDA's Qt bindings
import idaapi
has_ida = True
except ImportError:
# not running under IDA,
# so use default Qt installation
has_ida = False
if has_ida:
old_path = sys.path[:]
try:
ida_python_path = os.path.dirname(idaapi.__file__)
sys.path.insert(0, ida_python_path)
if idaapi.IDA_SDK_VERSION >= 690:
from PyQt5 import QtCore
return QtCore
else:
from PySide import QtCore
return QtCore
finally:
sys.path = old_path
else:
try:
from PyQt5 import QtCore
return QtCore
except ImportError:
pass
try:
from PySide import QtCore
return QtCore
except ImportError:
pass
raise ImportError("No module named PySide or PyQt") |
def get_meta_netnode():
"""
Get the netnode used to store settings metadata in the current IDB.
Note that this implicitly uses the open IDB via the idc iterface.
"""
node_name = "$ {org:s}.{application:s}".format(
org=IDA_SETTINGS_ORGANIZATION,
application=IDA_SETTINGS_APPLICATION)
return netnode.Netnode(node_name) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.