_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q10000
|
_get_usage
|
train
|
def _get_usage(function, *args):
"""Test if more memory is used after the function has been called.
The function will be invoked twice and only the second measurement will be
considered. Thus, memory used in initialisation (e.g. loading modules)
will not be included in the result. The goal is to identify memory leaks
caused by functions which use more and more memory.
Any arguments next to the function will be passed on to the function
on invocation.
Note that this function is currently experimental, because it is not
tested thoroughly and performs poorly.
"""
# The usage of a function is calculated by creating one summary of all
# objects before the function is invoked and afterwards. These summaries
# are compared and the diff is returned.
# This function works in a 2-steps process. Before the actual function is
# invoked an empty dummy function is measurement to identify the overhead
# involved in the measuring process. This overhead then is subtracted from
# the measurement performed on the passed function. The result reflects the
# actual usage of a function call.
# Also, a measurement is performed twice, allowing the adjustment to
# initializing things, e.g. modules
res = None
def _get_summaries(function, *args):
"""Get a 2-tuple containing one summary from before, and one summary
from after the function has been invoked.
"""
s_before = summary.summarize(get_objects())
function(*args)
s_after = summary.summarize(get_objects())
return (s_before, s_after)
def _get_usage(function, *args):
"""Get the usage of a function call.
This function is to be used only internally. The 'real' get_usage
function is a wrapper around _get_usage, but the workload is done
here.
"""
res = []
# init before calling
(s_before, s_after) = _get_summaries(function, *args)
# ignore all objects used for the measurement
ignore = []
if s_before != s_after:
ignore.append(s_before)
for row in s_before:
# ignore refs from summary and frame (loop)
if len(gc.get_referrers(row)) == 2:
ignore.append(row)
for item in row:
# ignore refs from summary and frame (loop)
if len(gc.get_referrers(item)) == 2:
ignore.append(item)
for o in ignore:
s_after = summary._subtract(s_after, o)
res = summary.get_diff(s_before, s_after)
return summary._sweep(res)
# calibrate; twice for initialization
def noop(): pass
offset = _get_usage(noop)
offset = _get_usage(noop)
# perform operation twice to handle objects possibly used in
# initialisation
tmp = _get_usage(function, *args)
tmp = _get_usage(function, *args)
tmp = summary.get_diff(offset, tmp)
tmp = summary._sweep(tmp)
if len(tmp) != 0:
res = tmp
return res
|
python
|
{
"resource": ""
}
|
q10001
|
_remove_duplicates
|
train
|
def _remove_duplicates(objects):
"""Remove duplicate objects.
Inspired by http://www.peterbe.com/plog/uniqifiers-benchmark
"""
seen = {}
result = []
for item in objects:
marker = id(item)
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result
|
python
|
{
"resource": ""
}
|
q10002
|
summarize
|
train
|
def summarize(objects):
"""Summarize an objects list.
Return a list of lists, whereas each row consists of::
[str(type), number of objects of this type, total size of these objects].
No guarantee regarding the order is given.
"""
count = {}
total_size = {}
for o in objects:
otype = _repr(o)
if otype in count:
count[otype] += 1
total_size[otype] += _getsizeof(o)
else:
count[otype] = 1
total_size[otype] = _getsizeof(o)
rows = []
for otype in count:
rows.append([otype, count[otype], total_size[otype]])
return rows
|
python
|
{
"resource": ""
}
|
q10003
|
get_diff
|
train
|
def get_diff(left, right):
"""Get the difference of two summaries.
Subtracts the values of the right summary from the values of the left
summary.
If similar rows appear on both sides, the are included in the summary with
0 for number of elements and total size.
If the number of elements of a row of the diff is 0, but the total size is
not, it means that objects likely have changed, but not there number, thus
resulting in a changed size.
"""
res = []
for row_r in right:
found = False
for row_l in left:
if row_r[0] == row_l[0]:
res.append([row_r[0], row_r[1] - row_l[1], row_r[2] - row_l[2]])
found = True
if not found:
res.append(row_r)
for row_l in left:
found = False
for row_r in right:
if row_l[0] == row_r[0]:
found = True
if not found:
res.append([row_l[0], -row_l[1], -row_l[2]])
return res
|
python
|
{
"resource": ""
}
|
q10004
|
print_
|
train
|
def print_(rows, limit=15, sort='size', order='descending'):
"""Print the rows as a summary.
Keyword arguments:
limit -- the maximum number of elements to be listed
sort -- sort elements by 'size', 'type', or '#'
order -- sort 'ascending' or 'descending'
"""
localrows = []
for row in rows:
localrows.append(list(row))
# input validation
sortby = ['type', '#', 'size']
if sort not in sortby:
raise ValueError("invalid sort, should be one of" + str(sortby))
orders = ['ascending', 'descending']
if order not in orders:
raise ValueError("invalid order, should be one of" + str(orders))
# sort rows
if sortby.index(sort) == 0:
if order == "ascending":
localrows.sort(key=lambda x: _repr(x[0]))
elif order == "descending":
localrows.sort(key=lambda x: _repr(x[0]), reverse=True)
else:
if order == "ascending":
localrows.sort(key=lambda x: x[sortby.index(sort)])
elif order == "descending":
localrows.sort(key=lambda x: x[sortby.index(sort)], reverse=True)
# limit rows
localrows = localrows[0:limit]
for row in localrows:
row[2] = stringutils.pp(row[2])
# print rows
localrows.insert(0,["types", "# objects", "total size"])
_print_table(localrows)
|
python
|
{
"resource": ""
}
|
q10005
|
_print_table
|
train
|
def _print_table(rows, header=True):
"""Print a list of lists as a pretty table.
Keyword arguments:
header -- if True the first row is treated as a table header
inspired by http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/267662
"""
border = "="
# vertical delimiter
vdelim = " | "
# padding nr. of spaces are left around the longest element in the
# column
padding = 1
# may be left,center,right
justify = 'right'
justify = {'left' : str.ljust,
'center' : str.center,
'right' : str.rjust}[justify.lower()]
# calculate column widths (longest item in each col
# plus "padding" nr of spaces on both sides)
cols = zip(*rows)
colWidths = [max([len(str(item))+2*padding for item in col]) for col in cols]
borderline = vdelim.join([w*border for w in colWidths])
for row in rows:
print(vdelim.join([justify(str(item),width) for (item,width) in zip(row,colWidths)]))
if header:
print(borderline)
header=False
|
python
|
{
"resource": ""
}
|
q10006
|
_repr
|
train
|
def _repr(o, verbosity=1):
"""Get meaning object representation.
This function should be used when the simple str(o) output would result in
too general data. E.g. "<type 'instance'" is less meaningful than
"instance: Foo".
Keyword arguments:
verbosity -- if True the first row is treated as a table header
"""
res = ""
t = type(o)
if (verbosity == 0) or (t not in representations):
res = str(t)
else:
verbosity -= 1
if len(representations[t]) < verbosity:
verbosity = len(representations[t]) - 1
res = representations[t][verbosity](o)
res = address.sub('', res)
res = type_prefix.sub('', res)
res = type_suffix.sub('', res)
return res
|
python
|
{
"resource": ""
}
|
q10007
|
_traverse
|
train
|
def _traverse(summary, function, *args):
"""Traverse all objects of a summary and call function with each as a
parameter.
Using this function, the following objects will be traversed:
- the summary
- each row
- each item of a row
"""
function(summary, *args)
for row in summary:
function(row, *args)
for item in row:
function(item, *args)
|
python
|
{
"resource": ""
}
|
q10008
|
_subtract
|
train
|
def _subtract(summary, o):
"""Remove object o from the summary by subtracting it's size."""
found = False
row = [_repr(o), 1, _getsizeof(o)]
for r in summary:
if r[0] == row[0]:
(r[1], r[2]) = (r[1] - row[1], r[2] - row[2])
found = True
if not found:
summary.append([row[0], -row[1], -row[2]])
return summary
|
python
|
{
"resource": ""
}
|
q10009
|
Polynomial.get_degree
|
train
|
def get_degree(self, poly=None):
'''Returns the degree of the polynomial'''
if not poly:
return self.degree
#return len(self.coefficients) - 1
elif poly and hasattr("coefficients", poly):
return len(poly.coefficients) - 1
else:
while poly and poly[-1] == 0:
poly.pop() # normalize
return len(poly)-1
|
python
|
{
"resource": ""
}
|
q10010
|
Polynomial.scale
|
train
|
def scale(self, scalar):
'''Multiply a polynomial with a scalar'''
return self.__class__([self.coefficients[i] * scalar for i in _range(len(self))])
|
python
|
{
"resource": ""
}
|
q10011
|
LineProfiler.wrap_function
|
train
|
def wrap_function(self, func):
""" Wrap a function to profile it.
"""
def f(*args, **kwds):
self.enable_by_count()
try:
result = func(*args, **kwds)
finally:
self.disable_by_count()
return result
return f
|
python
|
{
"resource": ""
}
|
q10012
|
LineProfiler.runctx
|
train
|
def runctx(self, cmd, globals, locals):
""" Profile a single executable statement in the given namespaces.
"""
self.enable_by_count()
try:
exec(cmd, globals, locals)
finally:
self.disable_by_count()
return self
|
python
|
{
"resource": ""
}
|
q10013
|
LineProfiler.runcall
|
train
|
def runcall(self, func, *args, **kw):
""" Profile a single function call.
"""
# XXX where is this used ? can be removed ?
self.enable_by_count()
try:
return func(*args, **kw)
finally:
self.disable_by_count()
|
python
|
{
"resource": ""
}
|
q10014
|
LineProfiler.disable_by_count
|
train
|
def disable_by_count(self):
""" Disable the profiler if the number of disable requests matches the
number of enable requests.
"""
if self.enable_count > 0:
self.enable_count -= 1
if self.enable_count == 0:
self.disable()
|
python
|
{
"resource": ""
}
|
q10015
|
parse_makefile_aliases
|
train
|
def parse_makefile_aliases(filepath):
'''
Parse a makefile to find commands and substitute variables. Expects a
makefile with only aliases and a line return between each command.
Returns a dict, with a list of commands for each alias.
'''
# -- Parsing the Makefile using ConfigParser
# Adding a fake section to make the Makefile a valid Ini file
ini_str = '[root]\n'
with open(filepath, 'r') as fd:
ini_str = ini_str + fd.read().replace('@make ', '')
ini_fp = StringIO.StringIO(ini_str)
# Parse using ConfigParser
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
# Fetch the list of aliases
aliases = config.options('root')
# -- Extracting commands for each alias
commands = {}
for alias in aliases:
# strip the first line return, and then split by any line return
commands[alias] = config.get('root', alias).lstrip('\n').split('\n')
# -- Commands substitution
# Loop until all aliases are substituted by their commands:
# Check each command of each alias, and if there is one command that is to
# be substituted by an alias, try to do it right away. If this is not
# possible because this alias itself points to other aliases , then stop
# and put the current alias back in the queue to be processed again later.
# Create the queue of aliases to process
aliases_todo = commands.keys()
# Create the dict that will hold the full commands
commands_new = {}
# Loop until we have processed all aliases
while aliases_todo:
# Pick the first alias in the queue
alias = aliases_todo.pop(0)
# Create a new entry in the resulting dict
commands_new[alias] = []
# For each command of this alias
for cmd in commands[alias]:
# Ignore self-referencing (alias points to itself)
if cmd == alias:
pass
# Substitute full command
elif cmd in aliases and cmd in commands_new:
# Append all the commands referenced by the alias
commands_new[alias].extend(commands_new[cmd])
# Delay substituting another alias, waiting for the other alias to
# be substituted first
elif cmd in aliases and cmd not in commands_new:
# Delete the current entry to avoid other aliases
# to reference this one wrongly (as it is empty)
del commands_new[alias]
aliases_todo.append(alias)
break
# Full command (no aliases)
else:
commands_new[alias].append(cmd)
commands = commands_new
del commands_new
# -- Prepending prefix to avoid conflicts with standard setup.py commands
# for alias in commands.keys():
# commands['make_'+alias] = commands[alias]
# del commands[alias]
return commands
|
python
|
{
"resource": ""
}
|
q10016
|
KThread.start
|
train
|
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread to install our trace.
threading.Thread.start(self)
|
python
|
{
"resource": ""
}
|
q10017
|
KThread.__run
|
train
|
def __run(self):
"""Hacked run function, which installs the trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
|
python
|
{
"resource": ""
}
|
q10018
|
MProfiler.codepoint_included
|
train
|
def codepoint_included(self, codepoint):
"""Check if codepoint matches any of the defined codepoints."""
if self.codepoints == None:
return True
for cp in self.codepoints:
mismatch = False
for i in range(len(cp)):
if (cp[i] is not None) and (cp[i] != codepoint[i]):
mismatch = True
break
if not mismatch:
return True
return False
|
python
|
{
"resource": ""
}
|
q10019
|
MProfiler.profile
|
train
|
def profile(self, frame, event, arg): #PYCHOK arg requ. to match signature
"""Profiling method used to profile matching codepoints and events."""
if (self.events == None) or (event in self.events):
frame_info = inspect.getframeinfo(frame)
cp = (frame_info[0], frame_info[2], frame_info[1])
if self.codepoint_included(cp):
objects = muppy.get_objects()
size = muppy.get_size(objects)
if cp not in self.memories:
self.memories[cp] = [0,0,0,0]
self.memories[cp][0] = 1
self.memories[cp][1] = size
self.memories[cp][2] = size
else:
self.memories[cp][0] += 1
if self.memories[cp][1] > size:
self.memories[cp][1] = size
if self.memories[cp][2] < size:
self.memories[cp][2] = size
|
python
|
{
"resource": ""
}
|
q10020
|
runprofile
|
train
|
def runprofile(mainfunction, output, timeout = 0, calibrate=False):
'''
Run the functions profiler and save the result
If timeout is greater than 0, the profile will automatically stops after timeout seconds
'''
if noprofiler == True:
print('ERROR: profiler and/or pstats library missing ! Please install it (probably package named python-profile) before running a profiling !')
return False
# This is the main function for profiling
def _profile():
profile.run(mainfunction, output)
print('=> RUNNING FUNCTIONS PROFILER\n\n'); sys.stdout.flush();
# Calibrate the profiler (only use this if the profiler produces some funny stuff, but calibration can also produce even more funny stuff with the latest cProfile of Python v2.7! So you should only enable calibration if necessary)
if calibrate:
print('Calibrating the profiler...'); sys.stdout.flush();
cval = calibrateprofile()
print('Calibration found value : %s' % cval); sys.stdout.flush();
print('Initializing the profiler...'); sys.stdout.flush();
# Run in timeout mode (if the function cannot ends by itself, this is the best mode: the function must ends for the profile to be saved)
if timeout > 0:
pthread = KThread(target=_profile) # we open the function with the profiler, in a special killable thread (see below why)
print('Will now run the profiling and terminate it in %s seconds. Results will be saved in %s' % (str(timeout), str(output))); sys.stdout.flush();
print('\nCountdown:'); sys.stdout.flush();
for i in range(0,5):
print(str(5-i))
sys.stdout.flush()
time.sleep(1)
print('0\nStarting to profile...'); sys.stdout.flush();
pthread.start() # starting the thread
time.sleep(float(timeout)) # after this amount of seconds, the thread gets killed and the profiler will end its job
print('\n\nFinishing the profile and saving to the file %s' % str(output)); sys.stdout.flush();
pthread.kill() # we must end the main function in order for the profiler to output its results (if we didn't launch a thread and just closed the process, it would have done no result)
# Run in full length mode (we run the function until it ends)
else:
print("Running the profiler, please wait until the process terminates by itself (if you forcequit before, the profile won't be saved)")
_profile()
print('=> Functions Profile done !')
return True
|
python
|
{
"resource": ""
}
|
q10021
|
parseprofile
|
train
|
def parseprofile(profilelog, out):
'''
Parse a profile log and print the result on screen
'''
file = open(out, 'w') # opening the output file
print('Opening the profile in %s...' % profilelog)
p = pstats.Stats(profilelog, stream=file) # parsing the profile with pstats, and output everything to the file
print('Generating the stats, please wait...')
file.write("=== All stats:\n")
p.strip_dirs().sort_stats(-1).print_stats()
file.write("=== Cumulative time:\n")
p.sort_stats('cumulative').print_stats(100)
file.write("=== Time:\n")
p.sort_stats('time').print_stats(100)
file.write("=== Time + cumulative time:\n")
p.sort_stats('time', 'cum').print_stats(.5, 'init')
file.write("=== Callees:\n")
p.print_callees()
file.write("=== Callers:\n")
p.print_callers()
#p.print_callers(.5, 'init')
#p.add('fooprof')
file.close()
print('Stats generated and saved to %s.' % out)
print('Everything is done. Exiting')
|
python
|
{
"resource": ""
}
|
q10022
|
browseprofile
|
train
|
def browseprofile(profilelog):
'''
Browse interactively a profile log in console
'''
print('Starting the pstats profile browser...\n')
try:
browser = ProfileBrowser(profilelog)
print >> browser.stream, "Welcome to the profile statistics browser. Type help to get started."
browser.cmdloop()
print >> browser.stream, "Goodbye."
except KeyboardInterrupt:
pass
|
python
|
{
"resource": ""
}
|
q10023
|
browseprofilegui
|
train
|
def browseprofilegui(profilelog):
'''
Browse interactively a profile log in GUI using RunSnakeRun and SquareMap
'''
from runsnakerun import runsnake # runsnakerun needs wxPython lib, if it's not available then we can pass if we don't want a GUI. RunSnakeRun is only used for GUI visualisation, not for profiling (and you can still use pstats for console browsing)
app = runsnake.RunSnakeRunApp(0)
app.OnInit(profilelog)
#app.OnInit()
app.MainLoop()
|
python
|
{
"resource": ""
}
|
q10024
|
GF2int._to_binpoly
|
train
|
def _to_binpoly(x):
'''Convert a Galois Field's number into a nice polynomial'''
if x <= 0: return "0"
b = 1 # init to 2^0 = 1
c = [] # stores the degrees of each term of the polynomials
i = 0 # counter for b = 2^i
while x > 0:
b = (1 << i) # generate a number power of 2: 2^0, 2^1, 2^2, ..., 2^i. Equivalent to b = 2^i
if x & b : # then check if x is divisible by the power of 2. Equivalent to x % 2^i == 0
# If yes, then...
c.append(i) # append this power (i, the exponent, gives us the coefficient)
x ^= b # and compute the remainder of x / b
i = i+1 # increment to compute the next power of 2
return " + ".join(["x^%i" % y for y in c[::-1]])
|
python
|
{
"resource": ""
}
|
q10025
|
loads
|
train
|
def loads( source ):
"""Load json structure from meliae from source
Supports only the required structures to support loading meliae memory dumps
"""
source = source.strip()
assert source.startswith( '{' )
assert source.endswith( '}' )
source = source[1:-1]
result = {}
for match in attr.finditer( source ):
key = match.group('key')
if match.group( 'list' ) is not None:
value = [
int(x)
for x in match.group( 'list' ).strip().replace(',',' ').split()
]
elif match.group( 'int' ) is not None:
value = int( match.group( 'int' ))
elif match.group( 'string' ) is not None:
def deescape( match ):
return unichr( int( match.group(0)[2:], 16 ))
value = match.group('string').decode( 'utf-8' )
value = escape.sub(
deescape,
value,
)
value = simple_escape.sub(
lambda x: x.group(1),
value,
)
else:
raise RuntimeError( "Matched something we don't know how to process:", match.groupdict() )
result[key] = value
return result
|
python
|
{
"resource": ""
}
|
q10026
|
RSCoder._list_lstrip
|
train
|
def _list_lstrip(self, L, val=0):
'''Left strip the specified value'''
for i in _range(len(L)):
if L[i] != val:
return L[i:]
|
python
|
{
"resource": ""
}
|
q10027
|
RSCoder._chien_search_fast
|
train
|
def _chien_search_fast(self, sigma):
'''Real chien search, we reuse the previous polynomial evaluation and just multiply by a constant polynomial. This should be faster, but it seems it's just the same speed as the other bruteforce version. However, it should easily be parallelizable.'''
# TODO: doesn't work when fcr is different than 1 (X values are incorrectly "shifted"...)
# TODO: try to mix this approach with the optimized walk on only interesting values, implemented in _chien_search_faster()
X = []
j = []
p = GF2int(self.generator)
if not hasattr(self, 'const_poly'): self.const_poly = [GF2int(self.generator)**(i+self.fcr) for i in _range(self.gf2_charac, -1, -1)] # constant polynomial that will allow us to update the previous polynomial evaluation to get the next one
const_poly = self.const_poly # caching for more efficiency since it never changes
ev_poly, ev = sigma.evaluate_array( p**1 ) # compute the first polynomial evaluation
# Try for each possible location
for l in _range(1, self.gf2_charac+1): # range 1:256 is important: if you use range 0:255, if the last byte of the ecc symbols is corrupted, it won't be correctable! You need to use the range 1,256 to include this last byte.
#l = (i+self.fcr)
# Check if it's a root for the polynomial
if ev == 0: # If it's 0, then bingo! It's an error location
# Compute the error location polynomial X (will be directly used to compute the errors magnitudes inside the Forney algorithm)
X.append( p**(-l) )
# Compute the coefficient position (not the error position, it's actually the reverse: we compute the degree of the term where the error is located. To get the error position, just compute n-1-j).
# This is different than the notes, I think the notes were in error
# Notes said j values were just l, when it's actually 255-l
j.append(self.gf2_charac - l)
# Update the polynomial evaluation for the next iteration
# we simply multiply each term[k] with alpha^k (where here alpha = p = GF2int(generator)).
# For more info, see the presentation by Andrew Brown, or this one: http://web.ntpu.edu.tw/~yshan/BCH_decoding.pdf
# TODO: parallelize this loop
for i in _range(1, len(ev_poly)+1): # TODO: maybe the fcr != 1 fix should be put here?
ev_poly[-i] *= const_poly[-i]
# Compute the new evaluation by just summing
ev = sum(ev_poly)
return X, j
|
python
|
{
"resource": ""
}
|
q10028
|
_ProcessMemoryInfoPS.update
|
train
|
def update(self):
"""
Get virtual and resident size of current process via 'ps'.
This should work for MacOS X, Solaris, Linux. Returns true if it was
successful.
"""
try:
p = Popen(['/bin/ps', '-p%s' % self.pid, '-o', 'rss,vsz'],
stdout=PIPE, stderr=PIPE)
except OSError: # pragma: no cover
pass
else:
s = p.communicate()[0].split()
if p.returncode == 0 and len(s) >= 2: # pragma: no branch
self.vsz = int(s[-1]) * 1024
self.rss = int(s[-2]) * 1024
return True
return False
|
python
|
{
"resource": ""
}
|
q10029
|
_ProcessMemoryInfoProc.update
|
train
|
def update(self):
"""
Get virtual size of current process by reading the process' stat file.
This should work for Linux.
"""
try:
stat = open('/proc/self/stat')
status = open('/proc/self/status')
except IOError: # pragma: no cover
return False
else:
stats = stat.read().split()
self.vsz = int( stats[22] )
self.rss = int( stats[23] ) * self.pagesize
self.pagefaults = int( stats[11] )
for entry in status.readlines():
key, value = entry.split(':')
size_in_bytes = lambda x: int(x.split()[0]) * 1024
if key == 'VmData':
self.data_segment = size_in_bytes(value)
elif key == 'VmExe':
self.code_segment = size_in_bytes(value)
elif key == 'VmLib':
self.shared_segment = size_in_bytes(value)
elif key == 'VmStk':
self.stack_segment = size_in_bytes(value)
key = self.key_map.get(key)
if key:
self.os_specific.append((key, value.strip()))
stat.close()
status.close()
return True
|
python
|
{
"resource": ""
}
|
q10030
|
DataView.SetColumns
|
train
|
def SetColumns( self, columns, sortOrder=None ):
"""Set columns to a set of values other than the originals and recreates column controls"""
self.columns = columns
self.sortOrder = [(x.defaultOrder,x) for x in self.columns if x.sortDefault]
self.CreateColumns()
|
python
|
{
"resource": ""
}
|
q10031
|
DataView.OnNodeActivated
|
train
|
def OnNodeActivated(self, event):
"""We have double-clicked for hit enter on a node refocus squaremap to this node"""
try:
node = self.sorted[event.GetIndex()]
except IndexError, err:
log.warn(_('Invalid index in node activated: %(index)s'),
index=event.GetIndex())
else:
wx.PostEvent(
self,
squaremap.SquareActivationEvent(node=node, point=None,
map=None)
)
|
python
|
{
"resource": ""
}
|
q10032
|
DataView.OnNodeSelected
|
train
|
def OnNodeSelected(self, event):
"""We have selected a node with the list control, tell the world"""
try:
node = self.sorted[event.GetIndex()]
except IndexError, err:
log.warn(_('Invalid index in node selected: %(index)s'),
index=event.GetIndex())
else:
if node is not self.selected_node:
wx.PostEvent(
self,
squaremap.SquareSelectionEvent(node=node, point=None,
map=None)
)
|
python
|
{
"resource": ""
}
|
q10033
|
DataView.SetIndicated
|
train
|
def SetIndicated(self, node):
"""Set this node to indicated status"""
self.indicated_node = node
self.indicated = self.NodeToIndex(node)
self.Refresh(False)
return self.indicated
|
python
|
{
"resource": ""
}
|
q10034
|
DataView.SetSelected
|
train
|
def SetSelected(self, node):
"""Set our selected node"""
self.selected_node = node
index = self.NodeToIndex(node)
if index != -1:
self.Focus(index)
self.Select(index, True)
return index
|
python
|
{
"resource": ""
}
|
q10035
|
DataView.OnReorder
|
train
|
def OnReorder(self, event):
"""Given a request to reorder, tell us to reorder"""
column = self.columns[event.GetColumn()]
return self.ReorderByColumn( column )
|
python
|
{
"resource": ""
}
|
q10036
|
DataView.ReorderByColumn
|
train
|
def ReorderByColumn( self, column ):
"""Reorder the set of records by column"""
# TODO: store current selection and re-select after sorting...
single_column = self.SetNewOrder( column )
self.reorder( single_column = True )
self.Refresh()
|
python
|
{
"resource": ""
}
|
q10037
|
DataView.reorder
|
train
|
def reorder(self, single_column=False):
"""Force a reorder of the displayed items"""
if single_column:
columns = self.sortOrder[:1]
else:
columns = self.sortOrder
for ascending,column in columns[::-1]:
# Python 2.2+ guarantees stable sort, so sort by each column in reverse
# order will order by the assigned columns
self.sorted.sort( key=column.get, reverse=(not ascending))
|
python
|
{
"resource": ""
}
|
q10038
|
DataView.integrateRecords
|
train
|
def integrateRecords(self, functions):
"""Integrate records from the loader"""
self.SetItemCount(len(functions))
self.sorted = functions[:]
self.reorder()
self.Refresh()
|
python
|
{
"resource": ""
}
|
q10039
|
DataView.OnGetItemText
|
train
|
def OnGetItemText(self, item, col):
"""Retrieve text for the item and column respectively"""
# TODO: need to format for rjust and the like...
try:
column = self.columns[col]
value = column.get(self.sorted[item])
except IndexError, err:
return None
else:
if value is None:
return u''
if column.percentPossible and self.percentageView and self.total:
value = value / float(self.total) * 100.00
if column.format:
try:
return column.format % (value,)
except Exception, err:
log.warn('Column %s could not format %r value: %r',
column.name, type(value), value
)
value = column.get(self.sorted[item] )
if isinstance(value,(unicode,str)):
return value
return unicode(value)
else:
if isinstance(value,(unicode,str)):
return value
return unicode(value)
|
python
|
{
"resource": ""
}
|
q10040
|
encode
|
train
|
def encode(input, output_filename):
"""Encodes the input data with reed-solomon error correction in 223 byte
blocks, and outputs each block along with 32 parity bytes to a new file by
the given filename.
input is a file-like object
The outputted image will be in png format, and will be 255 by x pixels with
one color channel. X is the number of 255 byte blocks from the input. Each
block of data will be one row, therefore, the data can be recovered if no
more than 16 pixels per row are altered.
"""
coder = rs.RSCoder(255,223)
output = []
while True:
block = input.read(223)
if not block: break
code = coder.encode_fast(block)
output.append(code)
sys.stderr.write(".")
sys.stderr.write("\n")
out = Image.new("L", (rowstride,len(output)))
out.putdata("".join(output))
out.save(output_filename)
|
python
|
{
"resource": ""
}
|
q10041
|
redirect
|
train
|
def redirect(url, code=303):
""" Aborts execution and causes a 303 redirect """
scriptname = request.environ.get('SCRIPT_NAME', '').rstrip('/') + '/'
location = urljoin(request.url, urljoin(scriptname, url))
raise HTTPResponse("", status=code, header=dict(Location=location))
|
python
|
{
"resource": ""
}
|
q10042
|
parse_date
|
train
|
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError):
return None
|
python
|
{
"resource": ""
}
|
q10043
|
run
|
train
|
def run(app=None, server=WSGIRefServer, host='127.0.0.1', port=8080,
interval=1, reloader=False, **kargs):
""" Runs bottle as a web server. """
app = app if app else default_app()
quiet = bool(kargs.get('quiet', False))
# Instantiate server, if it is a class instead of an instance
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise RuntimeError("Server must be a subclass of WSGIAdapter")
if not quiet and isinstance(server, ServerAdapter): # pragma: no cover
if not reloader or os.environ.get('BOTTLE_CHILD') == 'true':
print("Bottle server starting up (using %s)..." % repr(server))
print("Listening on http://%s:%d/" % (server.host, server.port))
print("Use Ctrl-C to quit.")
print()
else:
print("Bottle auto reloader starting up...")
try:
if reloader and interval:
reloader_run(server, app, interval)
else:
server.run(app)
except KeyboardInterrupt:
if not quiet: # pragma: no cover
print("Shutting Down...")
|
python
|
{
"resource": ""
}
|
q10044
|
template
|
train
|
def template(tpl, template_adapter=SimpleTemplate, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
'''
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.get('template_settings',{})
lookup = kwargs.get('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings: TEMPLATES[tpl].prepare(settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, settings=settings)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, settings=settings)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
kwargs['abort'] = abort
kwargs['request'] = request
kwargs['response'] = response
return TEMPLATES[tpl].render(**kwargs)
|
python
|
{
"resource": ""
}
|
q10045
|
Route.group_re
|
train
|
def group_re(self):
''' Return a regexp pattern with named groups '''
out = ''
for token, data in self.tokens():
if token == 'TXT': out += re.escape(data)
elif token == 'VAR': out += '(?P<%s>%s)' % (data[1], data[0])
elif token == 'ANON': out += '(?:%s)' % data
return out
|
python
|
{
"resource": ""
}
|
q10046
|
Route.format_str
|
train
|
def format_str(self):
''' Return a format string with named fields. '''
if self.static:
return self.route.replace('%','%%')
out, i = '', 0
for token, value in self.tokens():
if token == 'TXT': out += value.replace('%','%%')
elif token == 'ANON': out += '%%(anon%d)s' % i; i+=1
elif token == 'VAR': out += '%%(%s)s' % value[1]
return out
|
python
|
{
"resource": ""
}
|
q10047
|
Route.is_dynamic
|
train
|
def is_dynamic(self):
''' Return true if the route contains dynamic parts '''
if not self._static:
for token, value in self.tokens():
if token != 'TXT':
return True
self._static = True
return False
|
python
|
{
"resource": ""
}
|
q10048
|
Router.build
|
train
|
def build(self, route_name, **args):
''' Builds an URL out of a named route and some parameters.'''
try:
return self.named[route_name] % args
except KeyError:
raise RouteBuildError("No route found with name '%s'." % route_name)
|
python
|
{
"resource": ""
}
|
q10049
|
Bottle.add_filter
|
train
|
def add_filter(self, ftype, func):
''' Register a new output filter. Whenever bottle hits a handler output
matching `ftype`, `func` is applyed to it. '''
if not isinstance(ftype, type):
raise TypeError("Expected type object, got %s" % type(ftype))
self.castfilter = [(t, f) for (t, f) in self.castfilter if t != ftype]
self.castfilter.append((ftype, func))
self.castfilter.sort()
|
python
|
{
"resource": ""
}
|
q10050
|
Request.bind
|
train
|
def bind(self, environ, app=None):
""" Bind a new WSGI enviroment and clear out all previously computed
attributes.
This is done automatically for the global `bottle.request`
instance on every request.
"""
if isinstance(environ, Request): # Recycle already parsed content
for key in self.__dict__: #TODO: Test this
setattr(self, key, getattr(environ, key))
self.app = app
return
self._GET = self._POST = self._GETPOST = self._COOKIES = None
self._body = self._header = None
self.environ = environ
self.app = app
# These attributes are used anyway, so it is ok to compute them here
self.path = '/' + environ.get('PATH_INFO', '/').lstrip('/')
self.method = environ.get('REQUEST_METHOD', 'GET').upper()
|
python
|
{
"resource": ""
}
|
q10051
|
Request.path_shift
|
train
|
def path_shift(self, count=1):
''' Shift some levels of PATH_INFO into SCRIPT_NAME and return the
moved part. count defaults to 1'''
#/a/b/ /c/d --> 'a','b' 'c','d'
if count == 0: return ''
pathlist = self.path.strip('/').split('/')
scriptlist = self.environ.get('SCRIPT_NAME','/').strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if count > 0 and count <= len(pathlist):
moved = pathlist[:count]
scriptlist = scriptlist + moved
pathlist = pathlist[count:]
elif count < 0 and count >= -len(scriptlist):
moved = scriptlist[count:]
pathlist = moved + pathlist
scriptlist = scriptlist[:count]
else:
empty = 'SCRIPT_NAME' if count < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
self['PATH_INFO'] = self.path = '/' + '/'.join(pathlist) \
+ ('/' if self.path.endswith('/') and pathlist else '')
self['SCRIPT_NAME'] = '/' + '/'.join(scriptlist)
return '/'.join(moved)
|
python
|
{
"resource": ""
}
|
q10052
|
Request.POST
|
train
|
def POST(self):
""" The HTTP POST body parsed into a MultiDict.
This supports urlencoded and multipart POST requests. Multipart
is commonly used for file uploads and may result in some of the
values beeing cgi.FieldStorage objects instead of strings.
Multiple values per key are possible. See MultiDict for details.
"""
if self._POST is None:
save_env = dict() # Build a save environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ:
save_env[key] = self.environ[key]
save_env['QUERY_STRING'] = '' # Without this, sys.argv is called!
if TextIOWrapper:
fb = TextIOWrapper(self.body, encoding='ISO-8859-1')
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=save_env)
self._POST = MultiDict()
for item in data.list:
self._POST[item.name] = item if item.filename else item.value
return self._POST
|
python
|
{
"resource": ""
}
|
q10053
|
Request.params
|
train
|
def params(self):
""" A combined MultiDict with POST and GET parameters. """
if self._GETPOST is None:
self._GETPOST = MultiDict(self.GET)
self._GETPOST.update(dict(self.POST))
return self._GETPOST
|
python
|
{
"resource": ""
}
|
q10054
|
Response.copy
|
train
|
def copy(self):
''' Returns a copy of self '''
copy = Response(self.app)
copy.status = self.status
copy.headers = self.headers.copy()
copy.content_type = self.content_type
return copy
|
python
|
{
"resource": ""
}
|
q10055
|
magic_mprun
|
train
|
def magic_mprun(self, parameter_s=''):
""" Execute a statement under the line-by-line memory profiler from the
memory_profiler module.
Usage:
%mprun -f func1 -f func2 <statement>
The given statement (which doesn't require quote marks) is run via the
LineProfiler. Profiling is enabled for the functions specified by the -f
options. The statistics will be shown side-by-side with the code through
the pager once the statement has completed.
Options:
-f <function>: LineProfiler only profiles functions and methods it is told
to profile. This option tells the profiler about these functions. Multiple
-f options may be used. The argument may be any expression that gives
a Python function or method object. However, one must be careful to avoid
spaces that may confuse the option parser. Additionally, functions defined
in the interpreter at the In[] prompt or via %run currently cannot be
displayed. Write these functions out to a separate file and import them.
One or more -f options are required to get any useful results.
-T <filename>: dump the text-formatted statistics with the code
side-by-side out to a text file.
-r: return the LineProfiler object after it has completed profiling.
-c: If present, add the memory usage of any children process to the report.
"""
try:
from StringIO import StringIO
except ImportError: # Python 3.x
from io import StringIO
# Local imports to avoid hard dependency.
from distutils.version import LooseVersion
import IPython
ipython_version = LooseVersion(IPython.__version__)
if ipython_version < '0.11':
from IPython.genutils import page
from IPython.ipstruct import Struct
from IPython.ipapi import UsageError
else:
from IPython.core.page import page
from IPython.utils.ipstruct import Struct
from IPython.core.error import UsageError
# Escape quote markers.
opts_def = Struct(T=[''], f=[])
parameter_s = parameter_s.replace('"', r'\"').replace("'", r"\'")
opts, arg_str = self.parse_options(parameter_s, 'rf:T:c', list_all=True)
opts.merge(opts_def)
global_ns = self.shell.user_global_ns
local_ns = self.shell.user_ns
# Get the requested functions.
funcs = []
for name in opts.f:
try:
funcs.append(eval(name, global_ns, local_ns))
except Exception as e:
raise UsageError('Could not find function %r.\n%s: %s' % (name,
e.__class__.__name__, e))
include_children = 'c' in opts
profile = LineProfiler(include_children=include_children)
for func in funcs:
profile(func)
# Add the profiler to the builtins for @profile.
try:
import builtins
except ImportError: # Python 3x
import __builtin__ as builtins
if 'profile' in builtins.__dict__:
had_profile = True
old_profile = builtins.__dict__['profile']
else:
had_profile = False
old_profile = None
builtins.__dict__['profile'] = profile
try:
try:
profile.runctx(arg_str, global_ns, local_ns)
message = ''
except SystemExit:
message = "*** SystemExit exception caught in code being profiled."
except KeyboardInterrupt:
message = ("*** KeyboardInterrupt exception caught in code being "
"profiled.")
finally:
if had_profile:
builtins.__dict__['profile'] = old_profile
# Trap text output.
stdout_trap = StringIO()
show_results(profile, stdout_trap)
output = stdout_trap.getvalue()
output = output.rstrip()
if ipython_version < '0.11':
page(output, screen_lines=self.shell.rc.screen_length)
else:
page(output)
print(message,)
text_file = opts.T[0]
if text_file:
with open(text_file, 'w') as pfile:
pfile.write(output)
print('\n*** Profile printout saved to text file %s. %s' % (text_file,
message))
return_value = None
if 'r' in opts:
return_value = profile
return return_value
|
python
|
{
"resource": ""
}
|
q10056
|
profile
|
train
|
def profile(func, stream=None):
"""
Decorator that will run the function and print a line-by-line profile
"""
def wrapper(*args, **kwargs):
prof = LineProfiler()
val = prof(func)(*args, **kwargs)
show_results(prof, stream=stream)
return val
return wrapper
|
python
|
{
"resource": ""
}
|
q10057
|
TimeStamper.timestamp
|
train
|
def timestamp(self, name="<block>"):
"""Returns a context manager for timestamping a block of code."""
# Make a fake function
func = lambda x: x
func.__module__ = ""
func.__name__ = name
self.add_function(func)
timestamps = []
self.functions[func].append(timestamps)
# A new object is required each time, since there can be several
# nested context managers.
return _TimeStamperCM(timestamps)
|
python
|
{
"resource": ""
}
|
q10058
|
TimeStamper.wrap_function
|
train
|
def wrap_function(self, func):
""" Wrap a function to timestamp it.
"""
def f(*args, **kwds):
# Start time
timestamps = [_get_memory(os.getpid(), timestamps=True)]
self.functions[func].append(timestamps)
try:
result = func(*args, **kwds)
finally:
# end time
timestamps.append(_get_memory(os.getpid(), timestamps=True))
return result
return f
|
python
|
{
"resource": ""
}
|
q10059
|
LineProfiler.run
|
train
|
def run(self, cmd):
""" Profile a single executable statement in the main namespace.
"""
# TODO: can this be removed ?
import __main__
main_dict = __main__.__dict__
return self.runctx(cmd, main_dict, main_dict)
|
python
|
{
"resource": ""
}
|
q10060
|
PStatsLoader.get_root
|
train
|
def get_root( self, key ):
"""Retrieve a given declared root by root-type-key"""
if key not in self.roots:
function = getattr( self, 'load_%s'%(key,) )()
self.roots[key] = function
return self.roots[key]
|
python
|
{
"resource": ""
}
|
q10061
|
PStatsLoader.get_rows
|
train
|
def get_rows( self, key ):
"""Get the set of rows for the type-key"""
if key not in self.roots:
self.get_root( key )
if key == 'location':
return self.location_rows
else:
return self.rows
|
python
|
{
"resource": ""
}
|
q10062
|
PStatsLoader.load
|
train
|
def load( self, stats ):
"""Build a squaremap-compatible model from a pstats class"""
rows = self.rows
for func, raw in stats.iteritems():
try:
rows[func] = row = PStatRow( func,raw )
except ValueError, err:
log.info( 'Null row: %s', func )
for row in rows.itervalues():
row.weave( rows )
return self.find_root( rows )
|
python
|
{
"resource": ""
}
|
q10063
|
PStatsLoader._load_location
|
train
|
def _load_location( self ):
"""Build a squaremap-compatible model for location-based hierarchy"""
directories = {}
files = {}
root = PStatLocation( '/', 'PYTHONPATH' )
self.location_rows = self.rows.copy()
for child in self.rows.values():
current = directories.get( child.directory )
directory, filename = child.directory, child.filename
if current is None:
if directory == '':
current = root
else:
current = PStatLocation( directory, '' )
self.location_rows[ current.key ] = current
directories[ directory ] = current
if filename == '~':
filename = '<built-in>'
file_current = files.get( (directory,filename) )
if file_current is None:
file_current = PStatLocation( directory, filename )
self.location_rows[ file_current.key ] = file_current
files[ (directory,filename) ] = file_current
current.children.append( file_current )
file_current.children.append( child )
# now link the directories...
for key,value in directories.items():
if value is root:
continue
found = False
while key:
new_key,rest = os.path.split( key )
if new_key == key:
break
key = new_key
parent = directories.get( key )
if parent:
if value is not parent:
parent.children.append( value )
found = True
break
if not found:
root.children.append( value )
# lastly, finalize all of the directory records...
root.finalize()
return root
|
python
|
{
"resource": ""
}
|
q10064
|
PStatLocation.filter_children
|
train
|
def filter_children( self ):
"""Filter our children into regular and local children sets"""
real_children = []
for child in self.children:
if child.name == '<module>':
self.local_children.append( child )
else:
real_children.append( child )
self.children = real_children
|
python
|
{
"resource": ""
}
|
q10065
|
split_box
|
train
|
def split_box( fraction, x,y, w,h ):
"""Return set of two boxes where first is the fraction given"""
if w >= h:
new_w = int(w*fraction)
if new_w:
return (x,y,new_w,h),(x+new_w,y,w-new_w,h)
else:
return None,None
else:
new_h = int(h*fraction)
if new_h:
return (x,y,w,new_h),(x,y+new_h,w,h-new_h)
else:
return None,None
|
python
|
{
"resource": ""
}
|
q10066
|
HotMapNavigator.findNode
|
train
|
def findNode(class_, hot_map, targetNode, parentNode=None):
''' Find the target node in the hot_map. '''
for index, (rect, node, children) in enumerate(hot_map):
if node == targetNode:
return parentNode, hot_map, index
result = class_.findNode(children, targetNode, node)
if result:
return result
return None
|
python
|
{
"resource": ""
}
|
q10067
|
HotMapNavigator.firstChild
|
train
|
def firstChild(hot_map, index):
''' Return the first child of the node indicated by index. '''
children = hot_map[index][2]
if children:
return children[0][1]
else:
return hot_map[index][1]
|
python
|
{
"resource": ""
}
|
q10068
|
HotMapNavigator.nextChild
|
train
|
def nextChild(hotmap, index):
''' Return the next sibling of the node indicated by index. '''
nextChildIndex = min(index + 1, len(hotmap) - 1)
return hotmap[nextChildIndex][1]
|
python
|
{
"resource": ""
}
|
q10069
|
SquareMap.OnMouse
|
train
|
def OnMouse( self, event ):
"""Handle mouse-move event by selecting a given element"""
node = HotMapNavigator.findNodeAtPosition(self.hot_map, event.GetPosition())
self.SetHighlight( node, event.GetPosition() )
|
python
|
{
"resource": ""
}
|
q10070
|
SquareMap.OnClickRelease
|
train
|
def OnClickRelease( self, event ):
"""Release over a given square in the map"""
node = HotMapNavigator.findNodeAtPosition(self.hot_map, event.GetPosition())
self.SetSelected( node, event.GetPosition() )
|
python
|
{
"resource": ""
}
|
q10071
|
SquareMap.OnDoubleClick
|
train
|
def OnDoubleClick(self, event):
"""Double click on a given square in the map"""
node = HotMapNavigator.findNodeAtPosition(self.hot_map, event.GetPosition())
if node:
wx.PostEvent( self, SquareActivationEvent( node=node, point=event.GetPosition(), map=self ) )
|
python
|
{
"resource": ""
}
|
q10072
|
SquareMap.SetSelected
|
train
|
def SetSelected( self, node, point=None, propagate=True ):
"""Set the given node selected in the square-map"""
if node == self.selectedNode:
return
self.selectedNode = node
self.UpdateDrawing()
if node:
wx.PostEvent( self, SquareSelectionEvent( node=node, point=point, map=self ) )
|
python
|
{
"resource": ""
}
|
q10073
|
SquareMap.SetHighlight
|
train
|
def SetHighlight( self, node, point=None, propagate=True ):
"""Set the currently-highlighted node"""
if node == self.highlightedNode:
return
self.highlightedNode = node
# TODO: restrict refresh to the squares for previous node and new node...
self.UpdateDrawing()
if node and propagate:
wx.PostEvent( self, SquareHighlightEvent( node=node, point=point, map=self ) )
|
python
|
{
"resource": ""
}
|
q10074
|
SquareMap.Draw
|
train
|
def Draw(self, dc):
''' Draw the tree map on the device context. '''
self.hot_map = []
dc.BeginDrawing()
brush = wx.Brush( self.BackgroundColour )
dc.SetBackground( brush )
dc.Clear()
if self.model:
self.max_depth_seen = 0
font = self.FontForLabels(dc)
dc.SetFont(font)
self._em_size_ = dc.GetFullTextExtent( 'm', font )[0]
w, h = dc.GetSize()
self.DrawBox( dc, self.model, 0,0,w,h, hot_map = self.hot_map )
dc.EndDrawing()
|
python
|
{
"resource": ""
}
|
q10075
|
SquareMap.FontForLabels
|
train
|
def FontForLabels(self, dc):
''' Return the default GUI font, scaled for printing if necessary. '''
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
scale = dc.GetPPI()[0] / wx.ScreenDC().GetPPI()[0]
font.SetPointSize(scale*font.GetPointSize())
return font
|
python
|
{
"resource": ""
}
|
q10076
|
SquareMap.BrushForNode
|
train
|
def BrushForNode( self, node, depth=0 ):
"""Create brush to use to display the given node"""
if node == self.selectedNode:
color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHT)
elif node == self.highlightedNode:
color = wx.Colour( red=0, green=255, blue=0 )
else:
color = self.adapter.background_color(node, depth)
if not color:
red = (depth * 10)%255
green = 255-((depth * 5)%255)
blue = (depth * 25)%255
color = wx.Colour( red, green, blue )
return wx.Brush( color )
|
python
|
{
"resource": ""
}
|
q10077
|
SquareMap.PenForNode
|
train
|
def PenForNode( self, node, depth=0 ):
"""Determine the pen to use to display the given node"""
if node == self.selectedNode:
return self.SELECTED_PEN
return self.DEFAULT_PEN
|
python
|
{
"resource": ""
}
|
q10078
|
SquareMap.TextForegroundForNode
|
train
|
def TextForegroundForNode(self, node, depth=0):
"""Determine the text foreground color to use to display the label of
the given node"""
if node == self.selectedNode:
fg_color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT)
else:
fg_color = self.adapter.foreground_color(node, depth)
if not fg_color:
fg_color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOWTEXT)
return fg_color
|
python
|
{
"resource": ""
}
|
q10079
|
SquareMap.DrawBox
|
train
|
def DrawBox( self, dc, node, x,y,w,h, hot_map, depth=0 ):
"""Draw a model-node's box and all children nodes"""
log.debug( 'Draw: %s to (%s,%s,%s,%s) depth %s',
node, x,y,w,h, depth,
)
if self.max_depth and depth > self.max_depth:
return
self.max_depth_seen = max( (self.max_depth_seen,depth))
dc.SetBrush( self.BrushForNode( node, depth ) )
dc.SetPen( self.PenForNode( node, depth ) )
# drawing offset by margin within the square...
dx,dy,dw,dh = x+self.margin,y+self.margin,w-(self.margin*2),h-(self.margin*2)
if sys.platform == 'darwin':
# Macs don't like drawing small rounded rects...
if w < self.padding*2 or h < self.padding*2:
dc.DrawRectangle( dx,dy,dw,dh )
else:
dc.DrawRoundedRectangle( dx,dy,dw,dh, self.padding )
else:
dc.DrawRoundedRectangle( dx,dy,dw,dh, self.padding*3 )
# self.DrawIconAndLabel(dc, node, x, y, w, h, depth)
children_hot_map = []
hot_map.append( (wx.Rect( int(x),int(y),int(w),int(h)), node, children_hot_map ) )
x += self.padding
y += self.padding
w -= self.padding*2
h -= self.padding*2
empty = self.adapter.empty( node )
icon_drawn = False
if self.max_depth and depth == self.max_depth:
self.DrawIconAndLabel(dc, node, x, y, w, h, depth)
icon_drawn = True
elif empty:
# is a fraction of the space which is empty...
log.debug( ' empty space fraction: %s', empty )
new_h = h * (1.0-empty)
self.DrawIconAndLabel(dc, node, x, y, w, h-new_h, depth)
icon_drawn = True
y += (h-new_h)
h = new_h
if w >self.padding*2 and h> self.padding*2:
children = self.adapter.children( node )
if children:
log.debug( ' children: %s', children )
self.LayoutChildren( dc, children, node, x,y,w,h, children_hot_map, depth+1 )
else:
log.debug( ' no children' )
if not icon_drawn:
self.DrawIconAndLabel(dc, node, x, y, w, h, depth)
else:
log.debug( ' not enough space: children skipped' )
|
python
|
{
"resource": ""
}
|
q10080
|
SquareMap.DrawIconAndLabel
|
train
|
def DrawIconAndLabel(self, dc, node, x, y, w, h, depth):
''' Draw the icon, if any, and the label, if any, of the node. '''
if w-2 < self._em_size_//2 or h-2 < self._em_size_ //2:
return
dc.SetClippingRegion(x+1, y+1, w-2, h-2) # Don't draw outside the box
try:
icon = self.adapter.icon(node, node==self.selectedNode)
if icon and h >= icon.GetHeight() and w >= icon.GetWidth():
iconWidth = icon.GetWidth() + 2
dc.DrawIcon(icon, x+2, y+2)
else:
iconWidth = 0
if self.labels and h >= dc.GetTextExtent('ABC')[1]:
dc.SetTextForeground(self.TextForegroundForNode(node, depth))
dc.DrawText(self.adapter.label(node), x + iconWidth + 2, y+2)
finally:
dc.DestroyClippingRegion()
|
python
|
{
"resource": ""
}
|
q10081
|
DefaultAdapter.overall
|
train
|
def overall( self, node ):
"""Calculate overall size of the node including children and empty space"""
return sum( [self.value(value,node) for value in self.children(node)] )
|
python
|
{
"resource": ""
}
|
q10082
|
DefaultAdapter.children_sum
|
train
|
def children_sum( self, children,node ):
"""Calculate children's total sum"""
return sum( [self.value(value,node) for value in children] )
|
python
|
{
"resource": ""
}
|
q10083
|
DefaultAdapter.empty
|
train
|
def empty( self, node ):
"""Calculate empty space as a fraction of total space"""
overall = self.overall( node )
if overall:
return (overall - self.children_sum( self.children(node), node))/float(overall)
return 0
|
python
|
{
"resource": ""
}
|
q10084
|
format_sizeof
|
train
|
def format_sizeof(num, suffix='bytes'):
'''Readable size format, courtesy of Sridhar Ratnakumar'''
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1000.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.1f%s%s" % (num, 'Y', suffix)
|
python
|
{
"resource": ""
}
|
q10085
|
tqdm.close
|
train
|
def close(self):
"""
Call this method to force print the last progress bar update based on the latest n value
"""
if self.leave:
if self.last_print_n < self.n:
cur_t = time.time()
self.sp.print_status(format_meter(self.n, self.total, cur_t-self.start_t, self.ncols, self.prefix, self.unit, self.unit_format, self.ascii))
self.file.write('\n')
else:
self.sp.print_status('')
self.file.write('\r')
|
python
|
{
"resource": ""
}
|
q10086
|
tonativefunc
|
train
|
def tonativefunc(enc='utf-8'):
''' Returns a function that turns everything into 'native' strings using enc '''
if sys.version_info >= (3,0,0):
return lambda x: x.decode(enc) if isinstance(x, bytes) else str(x)
return lambda x: x.encode(enc) if isinstance(x, unicode) else str(x)
|
python
|
{
"resource": ""
}
|
q10087
|
Bottle.mount
|
train
|
def mount(self, app, script_path):
''' Mount a Bottle application to a specific URL prefix '''
if not isinstance(app, Bottle):
raise TypeError('Only Bottle instances are supported for now.')
script_path = '/'.join(filter(None, script_path.split('/')))
path_depth = script_path.count('/') + 1
if not script_path:
raise TypeError('Empty script_path. Perhaps you want a merge()?')
for other in self.mounts:
if other.startswith(script_path):
raise TypeError('Conflict with existing mount: %s' % other)
@self.route('/%s/:#.*#' % script_path, method="ANY")
def mountpoint():
request.path_shift(path_depth)
return app.handle(request.path, request.method)
self.mounts[script_path] = app
|
python
|
{
"resource": ""
}
|
q10088
|
Request.GET
|
train
|
def GET(self):
""" The QUERY_STRING parsed into a MultiDict.
Keys and values are strings. Multiple values per key are possible.
See MultiDict for details.
"""
if self._GET is None:
data = parse_qs(self.query_string, keep_blank_values=True)
self._GET = MultiDict()
for key, values in data.iteritems():
for value in values:
self._GET[key] = value
return self._GET
|
python
|
{
"resource": ""
}
|
q10089
|
Request.COOKIES
|
train
|
def COOKIES(self):
""" Cookie information parsed into a dictionary.
Secure cookies are NOT decoded automatically. See
Request.get_cookie() for details.
"""
if self._COOKIES is None:
raw_dict = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
self._COOKIES = {}
for cookie in raw_dict.itervalues():
self._COOKIES[cookie.key] = cookie.value
return self._COOKIES
|
python
|
{
"resource": ""
}
|
q10090
|
Response.set_cookie
|
train
|
def set_cookie(self, key, value, **kargs):
""" Add a new cookie with various options.
If the cookie value is not a string, a secure cookie is created.
Possible options are:
expires, path, comment, domain, max_age, secure, version, httponly
See http://de.wikipedia.org/wiki/HTTP-Cookie#Aufbau for details
"""
if not isinstance(value, basestring):
sec = self.app.config['securecookie.key']
value = cookie_encode(value, sec).decode('ascii') #2to3 hack
self.COOKIES[key] = value
for k, v in kargs.iteritems():
self.COOKIES[key][k.replace('_', '-')] = v
|
python
|
{
"resource": ""
}
|
q10091
|
tamper_file_at
|
train
|
def tamper_file_at(path, pos=0, replace_str=None):
""" Tamper a file at the given position and using the given string """
if not replace_str:
replace_str = "\x00"
try:
with open(path, "r+b") as fh:
if pos < 0: # if negative, we calculate the position backward from the end of file
fsize = os.fstat(fh.fileno()).st_size
pos = fsize + pos
fh.seek(pos)
fh.write(replace_str)
except IOError:
return False
finally:
try:
fh.close()
except Exception:
pass
return True
|
python
|
{
"resource": ""
}
|
q10092
|
tamper_file
|
train
|
def tamper_file(filepath, mode='e', proba=0.03, block_proba=None, blocksize=65535, burst_length=None, header=None):
""" Randomly tamper a file's content """
if header and header > 0:
blocksize = header
tamper_count = 0 # total number of characters tampered in the file
total_size = 0 # total buffer size, NOT necessarily the total file size (depends if you set header or not)
with open(filepath, "r+b") as fh: # 'r+' allows to read AND overwrite characters. Else any other option won't allow both ('a+' read and append, 'w+' erases the file first then allow to read and write), and 'b' is just for binary because we can open any filetype.
if proba >= 1: proba = 1.0/os.fstat(fh.fileno()).st_size * proba # normalizing probability if it's an integer (ie: the number of characters to flip on average)
buf = fh.read(blocksize) # We process blocks by blocks because it's a lot faster (IO is still the slowest operation in any computing system)
while len(buf) > 0:
total_size += len(buf)
if not block_proba or (random.random() < block_proba): # If block tampering is enabled, process only if this block is selected by probability
pos2tamper = []
burst_remain = 0 # if burst is enabled and corruption probability is triggered, then we will here store the remaining number of characters to corrupt (the length is uniformly sampled over the range specified in arguments)
# Create the list of bits to tamper (it's a lot more efficient to precompute the list of characters to corrupt, and then modify in the file the characters all at once)
for i in xrange(len(buf)):
if burst_remain > 0 or (random.random() < proba): # Corruption probability: corrupt only if below the bit-flip proba
pos2tamper.append(i) # keep this character's position in the to-be-corrupted list
if burst_remain > 0: # if we're already in a burst, we minus one and continue onto the next character
burst_remain -= 1
elif burst_length: # else we're not in a burst, we create one (triggered by corruption probability: as soon as one character triggers the corruption probability, then we do a burst)
burst_remain = random.randint(burst_length[0], burst_length[1]) - 1 # if burst is enabled, then we randomly (uniformly) pick a random length for the burst between the range specified, and since we already tampered one character, we minus 1
# If there's any character to tamper in the list, we tamper the string
if pos2tamper:
tamper_count = tamper_count + len(pos2tamper)
#print("Before: %s" % buf)
buf = bytearray(buf) # Strings in Python are immutable, thus we need to convert to a bytearray
for pos in pos2tamper:
if mode == 'e' or mode == 'erasure': # Erase the character (set a null byte)
buf[pos] = 0
elif mode == 'n' or mode == 'noise': # Noising the character (set a random ASCII character)
buf[pos] = random.randint(0,255)
#print("After: %s" % buf)
# Overwriting the string into the file
prevpos = fh.tell() # need to store and place back the seek cursor because after the write, if it's the end of the file, the next read may be buggy (getting characters that are not part of the file)
fh.seek(fh.tell()-len(buf)) # Move the cursor at the beginning of the string we just read
fh.write(buf) # Overwrite it
fh.seek(prevpos) # Restore the previous position after the string
# If we only tamper the header, we stop here by setting the buffer to an empty string
if header and header > 0:
buf = ''
# Else we continue to the next data block
else:
# Load the next characters from file
buf = fh.read(blocksize)
return [tamper_count, total_size]
|
python
|
{
"resource": ""
}
|
q10093
|
tamper_dir
|
train
|
def tamper_dir(inputpath, *args, **kwargs):
""" Randomly tamper the files content in a directory tree, recursively """
silent = kwargs.get('silent', False)
if 'silent' in kwargs: del kwargs['silent']
filescount = 0
for _ in tqdm(recwalk(inputpath), desc='Precomputing', disable=silent):
filescount += 1
files_tampered = 0
tamper_count = 0
total_size = 0
for dirname, filepath in tqdm(recwalk(inputpath), total=filescount, leave=True, desc='Tamper file n.', disable=silent):
tcount, tsize = tamper_file(os.path.join(dirname, filepath), *args, **kwargs)
if tcount > 0:
tamper_count += tcount
files_tampered += 1
total_size += tsize
return [files_tampered, filescount, tamper_count, total_size]
|
python
|
{
"resource": ""
}
|
q10094
|
TrackedObject._save_trace
|
train
|
def _save_trace(self):
"""
Save current stack trace as formatted string.
"""
stack_trace = stack()
try:
self.trace = []
for frm in stack_trace[5:]: # eliminate our own overhead
self.trace.insert(0, frm[1:])
finally:
del stack_trace
|
python
|
{
"resource": ""
}
|
q10095
|
TrackedObject.track_size
|
train
|
def track_size(self, ts, sizer):
"""
Store timestamp and current size for later evaluation.
The 'sizer' is a stateful sizing facility that excludes other tracked
objects.
"""
obj = self.ref()
self.snapshots.append(
(ts, sizer.asized(obj, detail=self._resolution_level))
)
if obj is not None:
self.repr = safe_repr(obj, clip=128)
|
python
|
{
"resource": ""
}
|
q10096
|
PeriodicThread.run
|
train
|
def run(self):
"""
Loop until a stop signal is set.
"""
self.stop = False
while not self.stop:
self.tracker.create_snapshot()
sleep(self.interval)
|
python
|
{
"resource": ""
}
|
q10097
|
Snapshot.label
|
train
|
def label(self):
"""Return timestamped label for this snapshot, or a raw timestamp."""
if not self.desc:
return "%.3fs" % self.timestamp
return "%s (%.3fs)" % (self.desc, self.timestamp)
|
python
|
{
"resource": ""
}
|
q10098
|
ClassTracker._tracker
|
train
|
def _tracker(self, _observer_, _self_, *args, **kwds):
"""
Injected constructor for tracked classes.
Call the actual constructor of the object and track the object.
Attach to the object before calling the constructor to track the object with
the parameters of the most specialized class.
"""
self.track_object(_self_,
name=_observer_.name,
resolution_level=_observer_.detail,
keep=_observer_.keep,
trace=_observer_.trace)
_observer_.init(_self_, *args, **kwds)
|
python
|
{
"resource": ""
}
|
q10099
|
ClassTracker._inject_constructor
|
train
|
def _inject_constructor(self, cls, func, name, resolution_level, keep,
trace):
"""
Modifying Methods in Place - after the recipe 15.7 in the Python
Cookbook by Ken Seehof. The original constructors may be restored
later.
"""
try:
constructor = cls.__init__
except AttributeError:
def constructor(self, *_args, **_kwargs):
pass
# Possible name clash between keyword arguments of the tracked class'
# constructor and the curried arguments of the injected constructor.
# Therefore, the additional argument has a 'magic' name to make it less
# likely that an argument name clash occurs.
self._observers[cls] = _ClassObserver(constructor,
name,
resolution_level,
keep,
trace)
cls.__init__ = instancemethod(
lambda *args, **kwds: func(self._observers[cls], *args, **kwds),
None,
cls
)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.