_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q5400
|
get_thread_block_dimensions
|
train
|
def get_thread_block_dimensions(params, block_size_names=None):
"""thread block size from tuning params, currently using convention"""
if not block_size_names:
block_size_names = default_block_size_names
block_size_x = params.get(block_size_names[0], 256)
block_size_y = params.get(block_size_names[1], 1)
block_size_z = params.get(block_size_names[2], 1)
return (int(block_size_x), int(block_size_y), int(block_size_z))
|
python
|
{
"resource": ""
}
|
q5401
|
looks_like_a_filename
|
train
|
def looks_like_a_filename(kernel_source):
""" attempt to detect whether source code or a filename was passed """
logging.debug('looks_like_a_filename called')
result = False
if isinstance(kernel_source, str):
result = True
#test if not too long
if len(kernel_source) > 250:
result = False
#test if not contains special characters
for c in "();{}\\":
if c in kernel_source:
result = False
#just a safeguard for stuff that looks like code
for s in ["__global__ ", "__kernel ", "void ", "float "]:
if s in kernel_source:
result = False
#string must contain substring ".c", ".opencl", or ".F"
result = result and any([s in kernel_source for s in (".c", ".opencl", ".F")])
logging.debug('kernel_source is a filename: %s' % str(result))
return result
|
python
|
{
"resource": ""
}
|
q5402
|
prepare_kernel_string
|
train
|
def prepare_kernel_string(kernel_name, kernel_string, params, grid, threads, block_size_names):
""" prepare kernel string for compilation
Prepends the kernel with a series of C preprocessor defines specific
to this kernel instance:
* the thread block dimensions
* the grid dimensions
* tunable parameters
Additionally the name of kernel is replace with an instance specific name. This
is done to prevent that the kernel compilation could be skipped by PyCUDA and/or PyOpenCL,
which may use caching to save compilation time. This feature could lead to strange bugs
in the source code if the name of the kernel is also used for other stuff.
:param kernel_name: Name of the kernel.
:type kernel_name: string
:param kernel_string: One of the source files of the kernel as a string containing code.
:type kernel_string: string
:param params: A dictionary containing the tunable parameters specific to this instance.
:type params: dict
:param grid: A tuple with the grid dimensions for this specific instance.
:type grid: tuple(x,y,z)
:param threads: A tuple with the thread block dimensions for this specific instance.
:type threads: tuple(x,y,z)
:param block_size_names: A tuple with the names of the thread block dimensions used
in the code. By default this is ["block_size_x", ...], but the user
may supply different names if they prefer.
:type block_size_names: tuple(string)
:returns: A string containing the source code made specific to this kernel instance.
:rtype: string
"""
logging.debug('prepare_kernel_string called for %s', kernel_name)
grid_dim_names = ["grid_size_x", "grid_size_y", "grid_size_z"]
for i, g in enumerate(grid):
kernel_string = "#define " + grid_dim_names[i] + " " + str(g) + "\n" + kernel_string
for i, g in enumerate(threads):
kernel_string = "#define " + block_size_names[i] + " " + str(g) + "\n" + kernel_string
for k, v in params.items():
if k not in block_size_names:
kernel_string = "#define " + k + " " + str(v) + "\n" + kernel_string
name = kernel_name
#name = kernel_name + "_" + get_instance_string(params)
#kernel_string = kernel_string.replace(kernel_name, name)
return name, kernel_string
|
python
|
{
"resource": ""
}
|
q5403
|
prepare_list_of_files
|
train
|
def prepare_list_of_files(kernel_name, kernel_file_list, params, grid, threads, block_size_names):
""" prepare the kernel string along with any additional files
The first file in the list is allowed to include or read in the others
The files beyond the first are considered additional files that may also contain tunable parameters
For each file beyond the first this function creates a temporary file with
preprocessors statements inserted. Occurences of the original filenames in the
first file are replaced with their temporary counterparts.
:param kernel_file_list: A list of filenames. The first file in the list is
allowed to read or include the other files in the list. All files may
will have access to the tunable parameters.
:type kernel_file_list: list(string)
:param params: A dictionary with the tunable parameters for this particular
instance.
:type params: dict()
:param grid: The grid dimensions for this instance. The grid dimensions are
also inserted into the code as if they are tunable parameters for
convenience.
:type grid: tuple()
"""
temp_files = dict()
kernel_string = get_kernel_string(kernel_file_list[0], params)
name, kernel_string = prepare_kernel_string(kernel_name, kernel_string, params, grid, threads, block_size_names)
if len(kernel_file_list) > 1:
for f in kernel_file_list[1:]:
#generate temp filename with the same extension
temp_file = get_temp_filename(suffix="." + f.split(".")[-1])
temp_files[f] = temp_file
#add preprocessor statements to the additional file
_, temp_file_string = prepare_kernel_string(kernel_name, get_kernel_string(f, params), params, grid, threads, block_size_names)
write_file(temp_file, temp_file_string)
#replace occurences of the additional file's name in the first kernel_string with the name of the temp file
kernel_string = kernel_string.replace(f, temp_file)
return name, kernel_string, temp_files
|
python
|
{
"resource": ""
}
|
q5404
|
read_file
|
train
|
def read_file(filename):
""" return the contents of the file named filename or None if file not found """
if os.path.isfile(filename):
with open(filename, 'r') as f:
return f.read()
|
python
|
{
"resource": ""
}
|
q5405
|
replace_param_occurrences
|
train
|
def replace_param_occurrences(string, params):
"""replace occurrences of the tuning params with their current value"""
for k, v in params.items():
string = string.replace(k, str(v))
return string
|
python
|
{
"resource": ""
}
|
q5406
|
setup_block_and_grid
|
train
|
def setup_block_and_grid(problem_size, grid_div, params, block_size_names=None):
"""compute problem size, thread block and grid dimensions for this kernel"""
threads = get_thread_block_dimensions(params, block_size_names)
current_problem_size = get_problem_size(problem_size, params)
grid = get_grid_dimensions(current_problem_size, params, grid_div, block_size_names)
return threads, grid
|
python
|
{
"resource": ""
}
|
q5407
|
write_file
|
train
|
def write_file(filename, string):
"""dump the contents of string to a file called filename"""
import sys
#ugly fix, hopefully we can find a better one
if sys.version_info[0] >= 3:
with open(filename, 'w', encoding="utf-8") as f:
f.write(string)
else:
with open(filename, 'w') as f:
f.write(string.encode("utf-8"))
|
python
|
{
"resource": ""
}
|
q5408
|
OpenCLFunctions.compile
|
train
|
def compile(self, kernel_name, kernel_string):
"""call the OpenCL compiler to compile the kernel, return the device function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The OpenCL kernel code that contains the function `kernel_name`
:type kernel_string: string
:returns: An OpenCL kernel that can be called directly.
:rtype: pyopencl.Kernel
"""
prg = cl.Program(self.ctx, kernel_string).build(options=self.compiler_options)
func = getattr(prg, kernel_name)
return func
|
python
|
{
"resource": ""
}
|
q5409
|
OpenCLFunctions.run_kernel
|
train
|
def run_kernel(self, func, gpu_args, threads, grid):
"""runs the OpenCL kernel passed as 'func'
:param func: An OpenCL Kernel
:type func: pyopencl.Kernel
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pyopencl.Buffer, numpy.int32, ...)
:param threads: A tuple listing the number of work items in each dimension of
the work group.
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of work groups in each dimension
of the NDRange.
:type grid: tuple(int, int)
"""
global_size = (grid[0]*threads[0], grid[1]*threads[1], grid[2]*threads[2])
local_size = threads
event = func(self.queue, global_size, local_size, *gpu_args)
event.wait()
|
python
|
{
"resource": ""
}
|
q5410
|
weighted_choice
|
train
|
def weighted_choice(population):
"""Randomly select, fitness determines probability of being selected"""
random_number = random.betavariate(1, 2.5) #increased probability of selecting members early in the list
#random_number = random.random()
ind = int(random_number*len(population))
ind = min(max(ind, 0), len(population)-1)
return population[ind][0]
|
python
|
{
"resource": ""
}
|
q5411
|
random_population
|
train
|
def random_population(dna_size, pop_size, tune_params):
"""create a random population"""
population = []
for _ in range(pop_size):
dna = []
for i in range(dna_size):
dna.append(random_val(i, tune_params))
population.append(dna)
return population
|
python
|
{
"resource": ""
}
|
q5412
|
random_val
|
train
|
def random_val(index, tune_params):
"""return a random value for a parameter"""
key = list(tune_params.keys())[index]
return random.choice(tune_params[key])
|
python
|
{
"resource": ""
}
|
q5413
|
crossover
|
train
|
def crossover(dna1, dna2):
"""crossover dna1 and dna2 at a random index"""
pos = int(random.random()*len(dna1))
if random.random() < 0.5:
return (dna1[:pos]+dna2[pos:], dna2[:pos]+dna1[pos:])
else:
return (dna2[:pos]+dna1[pos:], dna1[:pos]+dna2[pos:])
|
python
|
{
"resource": ""
}
|
q5414
|
_cost_func
|
train
|
def _cost_func(x, kernel_options, tuning_options, runner, results, cache):
""" Cost function used by minimize """
error_time = 1e20
logging.debug('_cost_func called')
logging.debug('x: ' + str(x))
x_key = ",".join([str(i) for i in x])
if x_key in cache:
return cache[x_key]
#snap values in x to nearest actual value for each parameter unscale x if needed
if tuning_options.scaling:
params = unscale_and_snap_to_nearest(x, tuning_options.tune_params, tuning_options.eps)
else:
params = snap_to_nearest_config(x, tuning_options.tune_params)
logging.debug('params ' + str(params))
x_int = ",".join([str(i) for i in params])
if x_int in cache:
return cache[x_int]
#check if this is a legal (non-restricted) parameter instance
if tuning_options.restrictions:
legal = util.check_restrictions(tuning_options.restrictions, params, tuning_options.tune_params.keys(), tuning_options.verbose)
if not legal:
cache[x_int] = error_time
cache[x_key] = error_time
return error_time
#compile and benchmark this instance
res, _ = runner.run([params], kernel_options, tuning_options)
#append to tuning results
if res:
results.append(res[0])
cache[x_int] = res[0]['time']
cache[x_key] = res[0]['time']
return res[0]['time']
cache[x_int] = error_time
cache[x_key] = error_time
return error_time
|
python
|
{
"resource": ""
}
|
q5415
|
get_bounds
|
train
|
def get_bounds(tune_params):
""" create a bounds array from the tunable parameters """
bounds = []
for values in tune_params.values():
sorted_values = numpy.sort(values)
bounds.append((sorted_values[0], sorted_values[-1]))
return bounds
|
python
|
{
"resource": ""
}
|
q5416
|
setup_method_options
|
train
|
def setup_method_options(method, tuning_options):
""" prepare method specific options """
kwargs = {}
#pass size of parameter space as max iterations to methods that support it
#it seems not all methods iterpret this value in the same manner
maxiter = numpy.prod([len(v) for v in tuning_options.tune_params.values()])
kwargs['maxiter'] = maxiter
if method in ["Nelder-Mead", "Powell"]:
kwargs['maxfev'] = maxiter
elif method == "L-BFGS-B":
kwargs['maxfun'] = maxiter
#pass eps to methods that support it
if method in ["CG", "BFGS", "L-BFGS-B", "TNC", "SLSQP"]:
kwargs['eps'] = tuning_options.eps
elif method == "COBYLA":
kwargs['rhobeg'] = tuning_options.eps
return kwargs
|
python
|
{
"resource": ""
}
|
q5417
|
snap_to_nearest_config
|
train
|
def snap_to_nearest_config(x, tune_params):
"""helper func that for each param selects the closest actual value"""
params = []
for i, k in enumerate(tune_params.keys()):
values = numpy.array(tune_params[k])
idx = numpy.abs(values-x[i]).argmin()
params.append(int(values[idx]))
return params
|
python
|
{
"resource": ""
}
|
q5418
|
unscale_and_snap_to_nearest
|
train
|
def unscale_and_snap_to_nearest(x, tune_params, eps):
"""helper func that snaps a scaled variable to the nearest config"""
x_u = [i for i in x]
for i, v in enumerate(tune_params.values()):
#create an evenly spaced linear space to map [0,1]-interval
#to actual values, giving each value an equal chance
#pad = 0.5/len(v) #use when interval is [0,1]
pad = 0.5*eps #use when interval is [0, eps*len(v)]
linspace = numpy.linspace(pad, (eps*len(v))-pad, len(v))
#snap value to nearest point in space, store index
idx = numpy.abs(linspace-x[i]).argmin()
#safeguard that should not be needed
idx = min(max(idx, 0), len(v)-1)
#use index into array of actual values
x_u[i] = v[idx]
return x_u
|
python
|
{
"resource": ""
}
|
q5419
|
SequentialRunner.run
|
train
|
def run(self, parameter_space, kernel_options, tuning_options):
""" Iterate through the entire parameter space using a single Python process
:param parameter_space: The parameter space as an iterable.
:type parameter_space: iterable
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.iterface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
logging.debug('sequential runner started for ' + kernel_options.kernel_name)
results = []
#iterate over parameter space
for element in parameter_space:
params = OrderedDict(zip(tuning_options.tune_params.keys(), element))
time = self.dev.compile_and_benchmark(self.gpu_args, params, kernel_options, tuning_options)
if time is None:
logging.debug('received time is None, kernel configuration was skipped silently due to compile or runtime failure')
continue
#print and append to results
params['time'] = time
output_string = get_config_string(params, self.units)
logging.debug(output_string)
if not self.quiet:
print(output_string)
results.append(params)
return results, self.dev.get_environment()
|
python
|
{
"resource": ""
}
|
q5420
|
allocate
|
train
|
def allocate(n, dtype=numpy.float32):
""" allocate context-portable pinned host memory """
return drv.pagelocked_empty(int(n), dtype, order='C', mem_flags=drv.host_alloc_flags.PORTABLE)
|
python
|
{
"resource": ""
}
|
q5421
|
CudaFunctions.compile
|
train
|
def compile(self, kernel_name, kernel_string):
"""call the CUDA compiler to compile the kernel, return the device function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The CUDA kernel code that contains the function `kernel_name`
:type kernel_string: string
:returns: An CUDA kernel that can be called directly.
:rtype: pycuda.driver.Function
"""
try:
no_extern_c = 'extern "C"' in kernel_string
compiler_options = ['-Xcompiler=-Wall']
if self.compiler_options:
compiler_options += self.compiler_options
self.current_module = self.source_mod(kernel_string, options=compiler_options + ["-e", kernel_name],
arch=('compute_' + self.cc) if self.cc != "00" else None,
code=('sm_' + self.cc) if self.cc != "00" else None,
cache_dir=False, no_extern_c=no_extern_c)
func = self.current_module.get_function(kernel_name)
return func
except drv.CompileError as e:
if "uses too much shared data" in e.stderr:
raise Exception("uses too much shared data")
else:
raise e
|
python
|
{
"resource": ""
}
|
q5422
|
CudaFunctions.copy_constant_memory_args
|
train
|
def copy_constant_memory_args(self, cmem_args):
"""adds constant memory arguments to the most recently compiled module
:param cmem_args: A dictionary containing the data to be passed to the
device constant memory. The format to be used is as follows: A
string key is used to name the constant memory symbol to which the
value needs to be copied. Similar to regular arguments, these need
to be numpy objects, such as numpy.ndarray or numpy.int32, and so on.
:type cmem_args: dict( string: numpy.ndarray, ... )
"""
logging.debug('copy_constant_memory_args called')
logging.debug('current module: ' + str(self.current_module))
for k, v in cmem_args.items():
symbol = self.current_module.get_global(k)[0]
logging.debug('copying to symbol: ' + str(symbol))
logging.debug('array to be copied: ')
logging.debug(v.nbytes)
logging.debug(v.dtype)
logging.debug(v.flags)
drv.memcpy_htod(symbol, v)
|
python
|
{
"resource": ""
}
|
q5423
|
CudaFunctions.copy_texture_memory_args
|
train
|
def copy_texture_memory_args(self, texmem_args):
"""adds texture memory arguments to the most recently compiled module
:param texmem_args: A dictionary containing the data to be passed to the
device texture memory. TODO
"""
filter_mode_map = { 'point': drv.filter_mode.POINT,
'linear': drv.filter_mode.LINEAR }
address_mode_map = { 'border': drv.address_mode.BORDER,
'clamp': drv.address_mode.CLAMP,
'mirror': drv.address_mode.MIRROR,
'wrap': drv.address_mode.WRAP }
logging.debug('copy_texture_memory_args called')
logging.debug('current module: ' + str(self.current_module))
self.texrefs = []
for k, v in texmem_args.items():
tex = self.current_module.get_texref(k)
self.texrefs.append(tex)
logging.debug('copying to texture: ' + str(k))
if not isinstance(v, dict):
data = v
else:
data = v['array']
logging.debug('texture to be copied: ')
logging.debug(data.nbytes)
logging.debug(data.dtype)
logging.debug(data.flags)
drv.matrix_to_texref(data, tex, order="C")
if isinstance(v, dict):
if 'address_mode' in v and v['address_mode'] is not None:
# address_mode is set per axis
amode = v['address_mode']
if not isinstance(amode, list):
amode = [ amode ] * data.ndim
for i, m in enumerate(amode):
try:
if m is not None:
tex.set_address_mode(i, address_mode_map[m])
except KeyError:
raise ValueError('Unknown address mode: ' + m)
if 'filter_mode' in v and v['filter_mode'] is not None:
fmode = v['filter_mode']
try:
tex.set_filter_mode(filter_mode_map[fmode])
except KeyError:
raise ValueError('Unknown filter mode: ' + fmode)
if 'normalized_coordinates' in v and v['normalized_coordinates']:
tex.set_flags(tex.get_flags() | drv.TRSF_NORMALIZED_COORDINATES)
|
python
|
{
"resource": ""
}
|
q5424
|
CudaFunctions.run_kernel
|
train
|
def run_kernel(self, func, gpu_args, threads, grid):
"""runs the CUDA kernel passed as 'func'
:param func: A PyCuda kernel compiled for this specific kernel configuration
:type func: pycuda.driver.Function
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pycuda.driver.DeviceAllocation, numpy.int32, ...)
:param threads: A tuple listing the number of threads in each dimension of
the thread block
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of thread blocks in each dimension
of the grid
:type grid: tuple(int, int)
"""
func(*gpu_args, block=threads, grid=grid, texrefs=self.texrefs)
|
python
|
{
"resource": ""
}
|
q5425
|
CudaFunctions.memcpy_htod
|
train
|
def memcpy_htod(self, dest, src):
"""perform a host to device memory copy
:param dest: A GPU memory allocation unit
:type dest: pycuda.driver.DeviceAllocation
:param src: A numpy array in host memory to store the data
:type src: numpy.ndarray
"""
if isinstance(dest, drv.DeviceAllocation):
drv.memcpy_htod(dest, src)
else:
dest = src
|
python
|
{
"resource": ""
}
|
q5426
|
acceptance_prob
|
train
|
def acceptance_prob(old_cost, new_cost, T):
"""annealing equation, with modifications to work towards a lower value"""
#if start pos is not valid, always move
if old_cost == 1e20:
return 1.0
#if we have found a valid ps before, never move to nonvalid pos
if new_cost == 1e20:
return 0.0
#always move if new cost is better
if new_cost < old_cost:
return 1.0
#maybe move if old cost is better than new cost depending on T and random value
return np.exp(((old_cost-new_cost)/old_cost)/T)
|
python
|
{
"resource": ""
}
|
q5427
|
neighbor
|
train
|
def neighbor(pos, tune_params):
"""return a random neighbor of pos"""
size = len(pos)
pos_out = []
# random mutation
# expected value is set that values all dimensions attempt to get mutated
for i in range(size):
key = list(tune_params.keys())[i]
values = tune_params[key]
if random.random() < 0.2: #replace with random value
new_value = random_val(i, tune_params)
else: #adjacent value
ind = values.index(pos[i])
if random.random() > 0.5:
ind += 1
else:
ind -= 1
ind = min(max(ind, 0), len(values)-1)
new_value = values[ind]
pos_out.append(new_value)
return pos_out
|
python
|
{
"resource": ""
}
|
q5428
|
tune
|
train
|
def tune(runner, kernel_options, device_options, tuning_options):
""" Tune all instances in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
tune_params = tuning_options.tune_params
restrictions = tuning_options.restrictions
verbose = tuning_options.verbose
#compute cartesian product of all tunable parameters
parameter_space = itertools.product(*tune_params.values())
#check for search space restrictions
if restrictions is not None:
parameter_space = filter(lambda p: util.check_restrictions(restrictions, p, tune_params.keys(), verbose), parameter_space)
results, env = runner.run(parameter_space, kernel_options, tuning_options)
return results, env
|
python
|
{
"resource": ""
}
|
q5429
|
CFunctions.ready_argument_list
|
train
|
def ready_argument_list(self, arguments):
"""ready argument list to be passed to the C function
:param arguments: List of arguments to be passed to the C function.
The order should match the argument list on the C function.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to the C function.
:rtype: list(Argument)
"""
ctype_args = [ None for _ in arguments]
for i, arg in enumerate(arguments):
if not isinstance(arg, (numpy.ndarray, numpy.number)):
raise TypeError("Argument is not numpy ndarray or numpy scalar %s" % type(arg))
dtype_str = str(arg.dtype)
data = arg.copy()
if isinstance(arg, numpy.ndarray):
if dtype_str in dtype_map.keys():
# In numpy <= 1.15, ndarray.ctypes.data_as does not itself keep a reference
# to its underlying array, so we need to store a reference to arg.copy()
# in the Argument object manually to avoid it being deleted.
# (This changed in numpy > 1.15.)
data_ctypes = data.ctypes.data_as(C.POINTER(dtype_map[dtype_str]))
else:
raise TypeError("unknown dtype for ndarray")
elif isinstance(arg, numpy.generic):
data_ctypes = dtype_map[dtype_str](arg)
ctype_args[i] = Argument(numpy=data, ctypes=data_ctypes)
return ctype_args
|
python
|
{
"resource": ""
}
|
q5430
|
CFunctions.compile
|
train
|
def compile(self, kernel_name, kernel_string):
"""call the C compiler to compile the kernel, return the function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The C code that contains the function `kernel_name`
:type kernel_string: string
:returns: An ctypes function that can be called directly.
:rtype: ctypes._FuncPtr
"""
logging.debug('compiling ' + kernel_name)
if self.lib != None:
self.cleanup_lib()
compiler_options = ["-fPIC"]
#detect openmp
if "#include <omp.h>" in kernel_string or "use omp_lib" in kernel_string:
logging.debug('set using_openmp to true')
self.using_openmp = True
if self.compiler == "pgfortran":
compiler_options.append("-mp")
else:
compiler_options.append("-fopenmp")
#select right suffix based on compiler
suffix = ".cc"
#detect whether to use nvcc as default instead of g++, may overrule an explicitly passed g++
if ("#include <cuda" in kernel_string) or ("cudaMemcpy" in kernel_string):
if self.compiler == "g++" and self.nvcc_available:
self.compiler = "nvcc"
#if contains device code suffix .cu is required by nvcc
if self.compiler == "nvcc" and "__global__" in kernel_string:
suffix = ".cu"
if self.compiler in ["gfortran", "pgfortran", "ftn", "ifort"]:
suffix = ".F90"
if self.compiler == "nvcc":
compiler_options = ["-Xcompiler=" + c for c in compiler_options]
if ".c" in suffix:
if not "extern \"C\"" in kernel_string:
kernel_string = "extern \"C\" {\n" + kernel_string + "\n}"
#copy user specified compiler options to current list
if self.compiler_options:
compiler_options += self.compiler_options
lib_args = []
if "CL/cl.h" in kernel_string:
lib_args = ["-lOpenCL"]
logging.debug('using compiler ' + self.compiler)
logging.debug('compiler_options ' + " ".join(compiler_options))
logging.debug('lib_args ' + " ".join(lib_args))
source_file = get_temp_filename(suffix=suffix)
filename = ".".join(source_file.split(".")[:-1])
#detect Fortran modules
match = re.search(r"\s*module\s+([a-zA-Z_]*)", kernel_string)
if match:
if self.compiler == "gfortran":
kernel_name = "__" + match.group(1) + "_MOD_" + kernel_name
elif self.compiler in ["ftn", "ifort"]:
kernel_name = match.group(1) + "_mp_" + kernel_name + "_"
elif self.compiler == "pgfortran":
kernel_name = match.group(1) + "_" + kernel_name + "_"
try:
write_file(source_file, kernel_string)
lib_extension = ".so"
if platform.system() == "Darwin":
lib_extension = ".dylib"
subprocess.check_call([self.compiler, "-c", source_file] + compiler_options + ["-o", filename + ".o"])
subprocess.check_call([self.compiler, filename + ".o"] + compiler_options + ["-shared", "-o", filename + lib_extension] + lib_args)
self.lib = numpy.ctypeslib.load_library(filename, '.')
func = getattr(self.lib, kernel_name)
func.restype = C.c_float
finally:
delete_temp_file(source_file)
delete_temp_file(filename+".o")
delete_temp_file(filename+".so")
delete_temp_file(filename+".dylib")
return func
|
python
|
{
"resource": ""
}
|
q5431
|
CFunctions.benchmark
|
train
|
def benchmark(self, func, c_args, threads, grid, times):
"""runs the kernel repeatedly, returns averaged returned value
The C function tuning is a little bit more flexible than direct CUDA
or OpenCL kernel tuning. The C function needs to measure time, or some
other quality metric you wish to tune on, on its own and should
therefore return a single floating-point value.
Benchmark runs the C function repeatedly and returns the average of the
values returned by the C function. The number of iterations is set
during the creation of the CFunctions object. For all measurements the
lowest and highest values are discarded and the rest is included in the
average. The reason for this is to be robust against initialization
artifacts and other exceptional cases.
:param func: A C function compiled for this specific configuration
:type func: ctypes._FuncPtr
:param c_args: A list of arguments to the function, order should match the
order in the code. The list should be prepared using
ready_argument_list().
:type c_args: list(Argument)
:param threads: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type threads: any
:param grid: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type grid: any
:param times: Return the execution time of all iterations.
:type times: bool
:returns: All execution times, if times=True, or a robust average for the
kernel execution time.
:rtype: float
"""
time = []
for _ in range(self.iterations):
value = self.run_kernel(func, c_args, threads, grid)
#I would like to replace the following with actually capturing
#stderr and detecting the error directly in Python, it proved
#however that capturing stderr for non-Python functions from Python
#is a rather difficult thing to do
#
#The current, less than ideal, scheme uses the convention that a
#negative time indicates a 'too many resources requested for launch'
if value < 0.0:
raise Exception("too many resources requested for launch")
time.append(value)
time = sorted(time)
if times:
return time
else:
if self.iterations > 4:
return numpy.mean(time[1:-1])
else:
return numpy.mean(time)
|
python
|
{
"resource": ""
}
|
q5432
|
CFunctions.run_kernel
|
train
|
def run_kernel(self, func, c_args, threads, grid):
"""runs the kernel once, returns whatever the kernel returns
:param func: A C function compiled for this specific configuration
:type func: ctypes._FuncPtr
:param c_args: A list of arguments to the function, order should match the
order in the code. The list should be prepared using
ready_argument_list().
:type c_args: list(Argument)
:param threads: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type threads: any
:param grid: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type grid: any
:returns: A robust average of values returned by the C function.
:rtype: float
"""
logging.debug("run_kernel")
logging.debug("arguments=" + str([str(arg.ctypes) for arg in c_args]))
time = func(*[arg.ctypes for arg in c_args])
return time
|
python
|
{
"resource": ""
}
|
q5433
|
CFunctions.cleanup_lib
|
train
|
def cleanup_lib(self):
""" unload the previously loaded shared library """
if not self.using_openmp:
#this if statement is necessary because shared libraries that use
#OpenMP will core dump when unloaded, this is a well-known issue with OpenMP
logging.debug('unloading shared library')
_ctypes.dlclose(self.lib._handle)
|
python
|
{
"resource": ""
}
|
q5434
|
clock
|
train
|
def clock(rpc):
"""
This task runs forever and notifies all clients subscribed to
'clock' once a second.
"""
while True:
yield from rpc.notify('clock', str(datetime.datetime.now()))
yield from asyncio.sleep(1)
|
python
|
{
"resource": ""
}
|
q5435
|
patch_db_connections
|
train
|
def patch_db_connections():
"""
This wraps django.db.connections._connections with a TaskLocal object.
The Django transactions are only thread-safe, using threading.local,
and don't know about coroutines.
"""
global __already_patched
if not __already_patched:
from django.db import connections
connections._connections = local(connections._connections)
__already_patched = True
|
python
|
{
"resource": ""
}
|
q5436
|
decode_msg
|
train
|
def decode_msg(raw_msg):
"""
Decodes jsonrpc 2.0 raw message objects into JsonRpcMsg objects.
Examples:
Request:
{
"jsonrpc": "2.0",
"id": 1,
"method": "subtract",
"params": [42, 23]
}
Notification:
{
"jsonrpc": "2.0",
"method": "clock",
"params": "12:00",
}
Response:
{
"jsonrpc": "2.0",
"id": 1,
"result": 0,
}
Error:
{
"jsonrpc": "2.0",
"id": 1,
"error": {
"code": -32600,
"message": "Invalid request",
"data": null
}
}
"""
try:
msg_data = json.loads(raw_msg)
except ValueError:
raise RpcParseError
# check jsonrpc version
if 'jsonrpc' not in msg_data or not msg_data['jsonrpc'] == JSONRPC:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# check requierd fields
if not len(set(['error', 'result', 'method']) & set(msg_data)) == 1:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# find message type
if 'method' in msg_data:
if 'id' in msg_data and msg_data['id'] is not None:
msg_type = JsonRpcMsgTyp.REQUEST
else:
msg_type = JsonRpcMsgTyp.NOTIFICATION
elif 'result' in msg_data:
msg_type = JsonRpcMsgTyp.RESULT
elif 'error' in msg_data:
msg_type = JsonRpcMsgTyp.ERROR
# Request Objects
if msg_type in (JsonRpcMsgTyp.REQUEST, JsonRpcMsgTyp.NOTIFICATION):
# 'method' fields have to be strings
if type(msg_data['method']) is not str:
raise RpcInvalidRequestError
# set empty 'params' if not set
if 'params' not in msg_data:
msg_data['params'] = None
# set empty 'id' if not set
if 'id' not in msg_data:
msg_data['id'] = None
# Response Objects
if msg_type in (JsonRpcMsgTyp.RESULT, JsonRpcMsgTyp.ERROR):
# every Response object has to define an id
if 'id' not in msg_data:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# Error objects
if msg_type == JsonRpcMsgTyp.ERROR:
# the error field has to be a dict
if type(msg_data['error']) is not dict:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# the error field has to define 'code' and 'message'
if not len(set(['code', 'message']) & set(msg_data['error'])) == 2:
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# the error code has to be in the specified ranges
if not msg_data['error']['code'] in RpcError.lookup_table.keys():
raise RpcInvalidRequestError(msg_id=msg_data.get('id', None))
# set empty 'data' field if not set
if 'data' not in msg_data['error']:
msg_data['error']['data'] = None
return JsonRpcMsg(msg_type, msg_data)
|
python
|
{
"resource": ""
}
|
q5437
|
switch_schema
|
train
|
def switch_schema(task, kwargs, **kw):
""" Switches schema of the task, before it has been run. """
# Lazily load needed functions, as they import django model functions which
# in turn load modules that need settings to be loaded and we can't
# guarantee this module was loaded when the settings were ready.
from .compat import get_public_schema_name, get_tenant_model
old_schema = (connection.schema_name, connection.include_public_schema)
setattr(task, '_old_schema', old_schema)
schema = (
get_schema_name_from_task(task, kwargs) or
get_public_schema_name()
)
# If the schema has not changed, don't do anything.
if connection.schema_name == schema:
return
if connection.schema_name != get_public_schema_name():
connection.set_schema_to_public()
if schema == get_public_schema_name():
return
tenant = get_tenant_model().objects.get(schema_name=schema)
connection.set_tenant(tenant, include_public=True)
|
python
|
{
"resource": ""
}
|
q5438
|
restore_schema
|
train
|
def restore_schema(task, **kwargs):
""" Switches the schema back to the one from before running the task. """
from .compat import get_public_schema_name
schema_name = get_public_schema_name()
include_public = True
if hasattr(task, '_old_schema'):
schema_name, include_public = task._old_schema
# If the schema names match, don't do anything.
if connection.schema_name == schema_name:
return
connection.set_schema(schema_name, include_public=include_public)
|
python
|
{
"resource": ""
}
|
q5439
|
node_link_data
|
train
|
def node_link_data(G, attrs=_attrs):
"""Return data in node-link format that is suitable for JSON serialization
and use in Javascript documents.
Parameters
----------
G : DyNetx graph
attrs : dict
A dictionary that contains three keys 'id', 'source' and 'target'.
The corresponding values provide the attribute names for storing
DyNetx-internal graph data. The values should be unique. Default
value:
:samp:`dict(id='id', source='source', target='target')`.
Returns
-------
data : dict
A dictionary with node-link formatted data.
Examples
--------
>>> from dynetx.readwrite import json_graph
>>> G = dn.DynGraph([(1,2)])
>>> data = json_graph.node_link_data(G)
To serialize with json
>>> import json
>>> s = json.dumps(data)
Notes
-----
Graph, node, and link attributes are stored in this format. Note that
attribute keys will be converted to strings in order to comply with
JSON.
See Also
--------
node_link_graph
"""
id_ = attrs['id']
data = {}
data['directed'] = G.is_directed()
data['graph'] = G.graph
data['nodes'] = [dict(chain(G.node[n].items(), [(id_, n)])) for n in G]
data['links'] = []
for u, v, timeline in G.interactions_iter():
for t in timeline['t']:
for tid in past.builtins.xrange(t[0], t[-1]+1):
data['links'].append({"source": u, "target": v, "time": tid})
return data
|
python
|
{
"resource": ""
}
|
q5440
|
compact_timeslot
|
train
|
def compact_timeslot(sind_list):
"""
Test method. Compact all snapshots into a single one.
:param sind_list:
:return:
"""
tls = sorted(sind_list)
conversion = {val: idx for idx, val in enumerate(tls)}
return conversion
|
python
|
{
"resource": ""
}
|
q5441
|
DynGraph.nodes_iter
|
train
|
def nodes_iter(self, t=None, data=False):
"""Return an iterator over the nodes with respect to a given temporal snapshot.
Parameters
----------
t : snapshot id (default=None).
If None the iterator returns all the nodes of the flattened graph.
data : boolean, optional (default=False)
If False the iterator returns nodes. If True
return a two-tuple of node and node data dictionary
Returns
-------
niter : iterator
An iterator over nodes. If data=True the iterator gives
two-tuples containing (node, node data, dictionary)
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], 0)
>>> [n for n, d in G.nodes_iter(t=0)]
[0, 1, 2]
"""
if t is not None:
return iter([n for n in self.degree(t=t).values() if n > 0])
return iter(self._node)
|
python
|
{
"resource": ""
}
|
q5442
|
DynGraph.nodes
|
train
|
def nodes(self, t=None, data=False):
"""Return a list of the nodes in the graph at a given snapshot.
Parameters
----------
t : snapshot id (default=None)
If None the the method returns all the nodes of the flattened graph.
data : boolean, optional (default=False)
If False return a list of nodes. If True return a
two-tuple of node and node data dictionary
Returns
-------
nlist : list
A list of nodes. If data=True a list of two-tuples containing
(node, node data dictionary).
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], 0)
>>> G.nodes(t=0)
[0, 1, 2]
>>> G.add_edge(1, 4, t=1)
>>> G.nodes(t=0)
[0, 1, 2]
"""
return list(self.nodes_iter(t=t, data=data))
|
python
|
{
"resource": ""
}
|
q5443
|
DynGraph.add_interactions_from
|
train
|
def add_interactions_from(self, ebunch, t=None, e=None):
"""Add all the interaction in ebunch at time t.
Parameters
----------
ebunch : container of interaction
Each interaction given in the container will be added to the
graph. The interaction must be given as as 2-tuples (u,v) or
3-tuples (u,v,d) where d is a dictionary containing interaction
data.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional
See Also
--------
add_edge : add a single interaction
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_edges_from([(0,1),(1,2)], t=0)
"""
# set up attribute dict
if t is None:
raise nx.NetworkXError(
"The t argument must be a specified.")
# process ebunch
for ed in ebunch:
self.add_interaction(ed[0], ed[1], t, e)
|
python
|
{
"resource": ""
}
|
q5444
|
DynGraph.neighbors
|
train
|
def neighbors(self, n, t=None):
"""Return a list of the nodes connected to the node n at time t.
Parameters
----------
n : node
A node in the graph
t : snapshot id (default=None)
If None will be returned the neighbors of the node on the flattened graph.
Returns
-------
nlist : list
A list of nodes that are adjacent to n.
Raises
------
NetworkXError
If the node n is not in the graph.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.neighbors(0, t=0)
[1]
>>> G.neighbors(0, t=1)
[]
"""
try:
if t is None:
return list(self._adj[n])
else:
return [i for i in self._adj[n] if self.__presence_test(n, i, t)]
except KeyError:
raise nx.NetworkXError("The node %s is not in the graph." % (n,))
|
python
|
{
"resource": ""
}
|
q5445
|
DynGraph.neighbors_iter
|
train
|
def neighbors_iter(self, n, t=None):
"""Return an iterator over all neighbors of node n at time t.
Parameters
----------
n : node
A node in the graph
t : snapshot id (default=None)
If None will be returned an iterator over the neighbors of the node on the flattened graph.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> [n for n in G.neighbors_iter(0, t=0)]
[1]
"""
try:
if t is None:
return iter(self._adj[n])
else:
return iter([i for i in self._adj[n] if self.__presence_test(n, i, t)])
except KeyError:
raise nx.NetworkXError("The node %s is not in the graph." % (n,))
|
python
|
{
"resource": ""
}
|
q5446
|
DynGraph.degree
|
train
|
def degree(self, nbunch=None, t=None):
"""Return the degree of a node or nodes at time t.
The node degree is the number of interaction adjacent to that node in a given time frame.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned the degree of nodes on the flattened graph.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and degree as values or
a number if a single node is specified.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.degree(0, t=0)
1
>>> G.degree([0,1], t=1)
{0: 0, 1: 0}
>>> list(G.degree([0,1], t=0).values())
[1, 2]
"""
if nbunch in self: # return a single node
return next(self.degree_iter(nbunch, t))[1]
else: # return a dict
return dict(self.degree_iter(nbunch, t))
|
python
|
{
"resource": ""
}
|
q5447
|
DynGraph.size
|
train
|
def size(self, t=None):
"""Return the number of edges at time t.
Parameters
----------
t : snapshot id (default=None)
If None will be returned the size of the flattened graph.
Returns
-------
nedges : int
The number of edges
See Also
--------
number_of_edges
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.size(t=0)
3
"""
s = sum(self.degree(t=t).values()) / 2
return int(s)
|
python
|
{
"resource": ""
}
|
q5448
|
DynGraph.number_of_nodes
|
train
|
def number_of_nodes(self, t=None):
"""Return the number of nodes in the t snpashot of a dynamic graph.
Parameters
----------
t : snapshot id (default=None)
If None return the number of nodes in the flattened graph.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
order which is identical
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], t=0)
>>> G.number_of_nodes(0)
3
"""
if t is None:
return len(self._node)
else:
nds = sum([1 for n in self.degree(t=t).values() if n > 0])
return nds
|
python
|
{
"resource": ""
}
|
q5449
|
DynGraph.has_node
|
train
|
def has_node(self, n, t=None):
"""Return True if the graph, at time t, contains the node n.
Parameters
----------
n : node
t : snapshot id (default None)
If None return the presence of the node in the flattened graph.
Examples
--------
>>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2], t=0)
>>> G.has_node(0, t=0)
True
It is more readable and simpler to use
>>> 0 in G
True
"""
if t is None:
try:
return n in self._node
except TypeError:
return False
else:
deg = list(self.degree([n], t).values())
if len(deg) > 0:
return deg[0] > 0
else:
return False
|
python
|
{
"resource": ""
}
|
q5450
|
DynGraph.to_directed
|
train
|
def to_directed(self):
"""Return a directed representation of the graph.
Returns
-------
G : DynDiGraph
A dynamic directed graph with the same name, same nodes, and with
each edge (u,v,data) replaced by two directed edges
(u,v,data) and (v,u,data).
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DynDiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Warning: If you have subclassed Graph to use dict-like objects in the
data structure, those changes do not transfer to the DynDiGraph
created by this method.
Examples
--------
>>> G = dn.DynGraph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = dn.DynDiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1)]
"""
from .dyndigraph import DynDiGraph
G = DynDiGraph()
G.name = self.name
G.add_nodes_from(self)
for it in self.interactions_iter():
for t in it[2]['t']:
G.add_interaction(it[0], it[1], t=t[0], e=t[1])
G.graph = deepcopy(self.graph)
G._node = deepcopy(self._node)
return G
|
python
|
{
"resource": ""
}
|
q5451
|
DynGraph.stream_interactions
|
train
|
def stream_interactions(self):
"""Generate a temporal ordered stream of interactions.
Returns
-------
nd_iter : an iterator
The iterator returns a 4-tuples of (node, node, op, timestamp).
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([3,4,5,6], t=1)
>>> list(G.stream_interactions())
[(0, 1, '+', 0), (1, 2, '+', 0), (2, 3, '+', 0), (3, 4, '+', 1), (4, 5, '+', 1), (5, 6, '+', 1)]
"""
timestamps = sorted(self.time_to_edge.keys())
for t in timestamps:
for e in self.time_to_edge[t]:
yield (e[0], e[1], e[2], t)
|
python
|
{
"resource": ""
}
|
q5452
|
DynGraph.interactions_per_snapshots
|
train
|
def interactions_per_snapshots(self, t=None):
"""Return the number of interactions within snapshot t.
Parameters
----------
t : snapshot id (default=None)
If None will be returned total number of interactions across all snapshots
Returns
-------
nd : dictionary, or number
A dictionary with snapshot ids as keys and interaction count as values or
a number if a single snapshot id is specified.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([0,4,5,6], t=1)
>>> G.add_path([7,1,2,3], t=2)
>>> G.interactions_per_snapshots(t=0)
3
>>> G.interactions_per_snapshots()
{0: 3, 1: 3, 2: 3}
"""
if t is None:
return {k: v / 2 for k, v in self.snapshots.items()}
else:
try:
return self.snapshots[t] / 2
except KeyError:
return 0
|
python
|
{
"resource": ""
}
|
q5453
|
DynDiGraph.in_interactions_iter
|
train
|
def in_interactions_iter(self, nbunch=None, t=None):
"""Return an iterator over the in interactions present in a given snapshot.
Edges are returned as tuples in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, 0)
>>> G.add_interaction(1,2, 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.in_interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.in_interactions_iter())
[(0, 1), (1, 2), (2, 3)]
"""
if nbunch is None:
nodes_nbrs_pred = self._pred.items()
else:
nodes_nbrs_pred = [(n, self._pred[n]) for n in self.nbunch_iter(nbunch)]
for n, nbrs in nodes_nbrs_pred:
for nbr in nbrs:
if t is not None:
if self.__presence_test(nbr, n, t):
yield (nbr, n, {"t": [t]})
else:
if nbr in self._pred[n]:
yield (nbr, n, self._pred[n][nbr])
|
python
|
{
"resource": ""
}
|
q5454
|
DynDiGraph.out_interactions_iter
|
train
|
def out_interactions_iter(self, nbunch=None, t=None):
"""Return an iterator over the out interactions present in a given snapshot.
Edges are returned as tuples
in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, 0)
>>> G.add_interaction(1,2, 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.out_interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.out_interactions_iter())
[(0, 1), (1, 2), (2, 3)]
"""
if nbunch is None:
nodes_nbrs_succ = self._succ.items()
else:
nodes_nbrs_succ = [(n, self._succ[n]) for n in self.nbunch_iter(nbunch)]
for n, nbrs in nodes_nbrs_succ:
for nbr in nbrs:
if t is not None:
if self.__presence_test(n, nbr, t):
yield (n, nbr, {"t": [t]})
else:
if nbr in self._succ[n]:
yield (n, nbr, self._succ[n][nbr])
|
python
|
{
"resource": ""
}
|
q5455
|
DynDiGraph.number_of_interactions
|
train
|
def number_of_interactions(self, u=None, v=None, t=None):
"""Return the number of interaction between two nodes at time t.
Parameters
----------
u, v : nodes, optional (default=all interaction)
If u and v are specified, return the number of interaction between
u and v. Otherwise return the total number of all interaction.
t : snapshot id (default=None)
If None will be returned the number of edges on the flattened graph.
Returns
-------
nedges : int
The number of interaction in the graph. If nodes u and v are specified
return the number of interaction between those nodes. If a single node is specified return None.
See Also
--------
size
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.number_of_interactions()
3
>>> G.number_of_interactions(0,1, t=0)
1
>>> G.add_edge(3, 4, t=1)
>>> G.number_of_interactions()
4
"""
if t is None:
if u is None:
return int(self.size())
elif u is not None and v is not None:
if v in self._succ[u]:
return 1
else:
return 0
else:
if u is None:
return int(self.size(t))
elif u is not None and v is not None:
if v in self._succ[u]:
if self.__presence_test(u, v, t):
return 1
else:
return 0
|
python
|
{
"resource": ""
}
|
q5456
|
DynDiGraph.in_degree
|
train
|
def in_degree(self, nbunch=None, t=None):
"""Return the in degree of a node or nodes at time t.
The node in degree is the number of incoming interaction to that node in a given time frame.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned the degree of nodes on the flattened graph.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and degree as values or
a number if a single node is specified.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, t=0)
>>> G.add_interaction(1,2, t=0)
>>> G.add_interaction(2,3, t=0)
>>> G.in_degree(0, t=0)
1
>>> G.in_degree([0,1], t=1)
{0: 0, 1: 0}
>>> list(G.in_degree([0,1], t=0).values())
[1, 2]
"""
if nbunch in self: # return a single node
return next(self.in_degree_iter(nbunch, t))[1]
else: # return a dict
return dict(self.in_degree_iter(nbunch, t))
|
python
|
{
"resource": ""
}
|
q5457
|
DynDiGraph.out_degree
|
train
|
def out_degree(self, nbunch=None, t=None):
"""Return the out degree of a node or nodes at time t.
The node degree is the number of interaction outgoing from that node in a given time frame.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None will be returned the degree of nodes on the flattened graph.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and degree as values or
a number if a single node is specified.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interactions(0,1, t=0)
>>> G.add_interactions(1,2, t=0)
>>> G.add_interactions(2,3, t=0)
>>> G.out_degree(0, t=0)
1
>>> G.out_degree([0,1], t=1)
{0: 0, 1: 0}
>>> list(G.out_degree([0,1], t=0).values())
[1, 2]
"""
if nbunch in self: # return a single node
return next(self.out_degree_iter(nbunch, t))[1]
else: # return a dict
return dict(self.out_degree_iter(nbunch, t))
|
python
|
{
"resource": ""
}
|
q5458
|
DynDiGraph.to_undirected
|
train
|
def to_undirected(self, reciprocal=False):
"""Return an undirected representation of the dyndigraph.
Parameters
----------
reciprocal : bool (optional)
If True only keep edges that appear in both directions
in the original dyndigraph.
Returns
-------
G : DynGraph
An undirected dynamic graph with the same name and nodes and
with edge (u,v,data) if either (u,v,data) or (v,u,data)
is in the dyndigraph. If both edges exist in dyndigraph and
their edge data is different, only one edge is created
with an arbitrary choice of which edge data to use.
You must check and correct for this manually if desired.
Notes
-----
If edges in both directions (u,v) and (v,u) exist in the
graph, attributes for the new undirected edge will be a combination of
the attributes of the directed edges. The edge data is updated
in the (arbitrary) order that the edges are encountered. For
more customized control of the edge attributes use add_edge().
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DynDiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Warning: If you have subclassed DiGraph to use dict-like objects
in the data structure, those changes do not transfer to the Graph
created by this method.
"""
from .dyngraph import DynGraph
H = DynGraph()
H.name = self.name
H.add_nodes_from(self)
if reciprocal is True:
for u in self._node:
for v in self._node:
if u >= v:
try:
outc = self._succ[u][v]['t']
intc = self._pred[u][v]['t']
for o in outc:
r = set(range(o[0], o[1]+1))
for i in intc:
r2 = set(range(i[0], i[1]+1))
inter = list(r & r2)
if len(inter) == 1:
H.add_interaction(u, v, t=inter[0])
elif len(inter) > 1:
H.add_interaction(u, v, t=inter[0], e=inter[-1])
except:
pass
else:
for it in self.interactions_iter():
for t in it[2]['t']:
H.add_interaction(it[0], it[1], t=t[0], e=t[1])
H.graph = deepcopy(self.graph)
H._node = deepcopy(self._node)
return H
|
python
|
{
"resource": ""
}
|
q5459
|
write_interactions
|
train
|
def write_interactions(G, path, delimiter=' ', encoding='utf-8'):
"""Write a DyNetx graph in interaction list format.
Parameters
----------
G : graph
A DyNetx graph.
path : basestring
The desired output filename
delimiter : character
Column delimiter
"""
for line in generate_interactions(G, delimiter):
line += '\n'
path.write(line.encode(encoding))
|
python
|
{
"resource": ""
}
|
q5460
|
read_interactions
|
train
|
def read_interactions(path, comments="#", directed=False, delimiter=None,
nodetype=None, timestamptype=None, encoding='utf-8', keys=False):
"""Read a DyNetx graph from interaction list format.
Parameters
----------
path : basestring
The desired output filename
delimiter : character
Column delimiter
"""
ids = None
lines = (line.decode(encoding) for line in path)
if keys:
ids = read_ids(path.name, delimiter=delimiter, timestamptype=timestamptype)
return parse_interactions(lines, comments=comments, directed=directed, delimiter=delimiter, nodetype=nodetype,
timestamptype=timestamptype, keys=ids)
|
python
|
{
"resource": ""
}
|
q5461
|
write_snapshots
|
train
|
def write_snapshots(G, path, delimiter=' ', encoding='utf-8'):
"""Write a DyNetx graph in snapshot graph list format.
Parameters
----------
G : graph
A DyNetx graph.
path : basestring
The desired output filename
delimiter : character
Column delimiter
"""
for line in generate_snapshots(G, delimiter):
line += '\n'
path.write(line.encode(encoding))
|
python
|
{
"resource": ""
}
|
q5462
|
read_snapshots
|
train
|
def read_snapshots(path, comments="#", directed=False, delimiter=None,
nodetype=None, timestamptype=None, encoding='utf-8', keys=False):
"""Read a DyNetx graph from snapshot graph list format.
Parameters
----------
path : basestring
The desired output filename
delimiter : character
Column delimiter
"""
ids = None
lines = (line.decode(encoding) for line in path)
if keys:
ids = read_ids(path.name, delimiter=delimiter, timestamptype=timestamptype)
return parse_snapshots(lines, comments=comments, directed=directed, delimiter=delimiter, nodetype=nodetype,
timestamptype=timestamptype, keys=ids)
|
python
|
{
"resource": ""
}
|
q5463
|
open_file
|
train
|
def open_file(path_arg, mode='r'):
"""Decorator to ensure clean opening and closing of files.
Parameters
----------
path_arg : int
Location of the path argument in args. Even if the argument is a
named positional argument (with a default value), you must specify its
index as a positional argument.
mode : str
String for opening mode.
Returns
-------
_open_file : function
Function which cleanly executes the io.
Examples
--------
Decorate functions like this::
@open_file(0,'r')
def read_function(pathname):
pass
@open_file(1,'w')
def write_function(G,pathname):
pass
@open_file(1,'w')
def write_function(G, pathname='graph.dot')
pass
@open_file('path', 'w+')
def another_function(arg, **kwargs):
path = kwargs['path']
pass
"""
# Note that this decorator solves the problem when a path argument is
# specified as a string, but it does not handle the situation when the
# function wants to accept a default of None (and then handle it).
# Here is an example:
#
# @open_file('path')
# def some_function(arg1, arg2, path=None):
# if path is None:
# fobj = tempfile.NamedTemporaryFile(delete=False)
# close_fobj = True
# else:
# # `path` could have been a string or file object or something
# # similar. In any event, the decorator has given us a file object
# # and it will close it for us, if it should.
# fobj = path
# close_fobj = False
#
# try:
# fobj.write('blah')
# finally:
# if close_fobj:
# fobj.close()
#
# Normally, we'd want to use "with" to ensure that fobj gets closed.
# However, recall that the decorator will make `path` a file object for
# us, and using "with" would undesirably close that file object. Instead,
# you use a try block, as shown above. When we exit the function, fobj will
# be closed, if it should be, by the decorator.
@decorator
def _open_file(func, *args, **kwargs):
# Note that since we have used @decorator, *args, and **kwargs have
# already been resolved to match the function signature of func. This
# means default values have been propagated. For example, the function
# func(x, y, a=1, b=2, **kwargs) if called as func(0,1,b=5,c=10) would
# have args=(0,1,1,5) and kwargs={'c':10}.
# First we parse the arguments of the decorator. The path_arg could
# be an positional argument or a keyword argument. Even if it is
try:
# path_arg is a required positional argument
# This works precisely because we are using @decorator
path = args[path_arg]
except TypeError:
# path_arg is a keyword argument. It is "required" in the sense
# that it must exist, according to the decorator specification,
# It can exist in `kwargs` by a developer specified default value
# or it could have been explicitly set by the user.
try:
path = kwargs[path_arg]
except KeyError:
# Could not find the keyword. Thus, no default was specified
# in the function signature and the user did not provide it.
msg = 'Missing required keyword argument: {0}'
raise nx.NetworkXError(msg.format(path_arg))
else:
is_kwarg = True
except IndexError:
# A "required" argument was missing. This can only happen if
# the decorator of the function was incorrectly specified.
# So this probably is not a user error, but a developer error.
msg = "path_arg of open_file decorator is incorrect"
raise nx.NetworkXError(msg)
else:
is_kwarg = False
# Now we have the path_arg. There are two types of input to consider:
# 1) string representing a path that should be opened
# 2) an already opened file object
if is_string_like(path):
ext = splitext(path)[1]
fobj = _dispatch_dict[ext](path, mode=mode)
close_fobj = True
elif hasattr(path, 'read'):
# path is already a file-like object
fobj = path
close_fobj = False
else:
# could be None, in which case the algorithm will deal with it
fobj = path
close_fobj = False
# Insert file object into args or kwargs.
if is_kwarg:
new_args = args
kwargs[path_arg] = fobj
else:
# args is a tuple, so we must convert to list before modifying it.
new_args = list(args)
new_args[path_arg] = fobj
# Finally, we call the original function, making sure to close the fobj.
try:
result = func(*new_args, **kwargs)
finally:
if close_fobj:
fobj.close()
return result
return _open_file
|
python
|
{
"resource": ""
}
|
q5464
|
number_of_interactions
|
train
|
def number_of_interactions(G, u=None, v=None, t=None):
"""Return the number of edges between two nodes at time t.
Parameters
----------
u, v : nodes, optional (default=all edges)
If u and v are specified, return the number of edges between
u and v. Otherwise return the total number of all edges.
t : snapshot id (default=None)
If None will be returned the number of edges on the flattened graph.
Returns
-------
nedges : int
The number of edges in the graph. If nodes u and v are specified
return the number of edges between those nodes.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> dn.number_of_interactions(G, t=0)
"""
return G.number_of_interactions(u, v, t)
|
python
|
{
"resource": ""
}
|
q5465
|
density
|
train
|
def density(G, t=None):
r"""Return the density of a graph at timestamp t.
The density for undirected graphs is
.. math::
d = \frac{2m}{n(n-1)},
and for directed graphs is
.. math::
d = \frac{m}{n(n-1)},
where `n` is the number of nodes and `m` is the number of edges in `G`.
Parameters
----------
G : Graph opject
DyNetx graph object
t : snapshot id (default=None)
If None the density will be computed on the flattened graph.
Notes
-----
The density is 0 for a graph without edges and 1 for a complete graph.
Self loops are counted in the total number of edges so graphs with self
loops can have density higher than 1.
"""
n = number_of_nodes(G, t)
m = number_of_interactions(G, t)
if m == 0 or m is None or n <= 1:
return 0
d = m / (n * (n - 1))
if not G.is_directed():
d *= 2
return d
|
python
|
{
"resource": ""
}
|
q5466
|
degree_histogram
|
train
|
def degree_histogram(G, t=None):
"""Return a list of the frequency of each degree value.
Parameters
----------
G : Graph opject
DyNetx graph object
t : snapshot id (default=None)
snapshot id
Returns
-------
hist : list
A list of frequencies of degrees.
The degree values are the index in the list.
Notes
-----
Note: the bins are width one, hence len(list) can be large
(Order(number_of_edges))
"""
counts = Counter(d for n, d in G.degree(t=t).items())
return [counts.get(i, 0) for i in range(max(counts) + 1)]
|
python
|
{
"resource": ""
}
|
q5467
|
freeze
|
train
|
def freeze(G):
"""Modify graph to prevent further change by adding or removing nodes or edges.
Node and edge data can still be modified.
Parameters
----------
G : graph
A NetworkX graph
Notes
-----
To "unfreeze" a graph you must make a copy by creating a new graph object.
See Also
--------
is_frozen
"""
G.add_node = frozen
G.add_nodes_from = frozen
G.remove_node = frozen
G.remove_nodes_from = frozen
G.add_edge = frozen
G.add_edges_from = frozen
G.remove_edge = frozen
G.remove_edges_from = frozen
G.clear = frozen
G.frozen = True
return G
|
python
|
{
"resource": ""
}
|
q5468
|
set_node_attributes
|
train
|
def set_node_attributes(G, values, name=None):
"""Set node attributes from dictionary of nodes and values
Parameters
----------
G : DyNetx Graph
name : string
Attribute name
values: dict
Dictionary of attribute values keyed by node. If `values` is not a
dictionary, then it is treated as a single attribute value that is then
applied to every node in `G`.
"""
# Set node attributes based on type of `values`
if name is not None: # `values` must not be a dict of dict
try: # `values` is a dict
for n, v in values.items():
try:
G.node[n][name] = values[n]
except KeyError:
pass
except AttributeError: # `values` is a constant
for n in G:
G.node[n][name] = values
else: # `values` must be dict of dict
for n, d in values.items():
try:
G.node[n].update(d)
except KeyError:
pass
|
python
|
{
"resource": ""
}
|
q5469
|
get_node_attributes
|
train
|
def get_node_attributes(G, name):
"""Get node attributes from graph
Parameters
----------
G : DyNetx Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by node.
"""
return {n: d[name] for n, d in G.node.items() if name in d}
|
python
|
{
"resource": ""
}
|
q5470
|
all_neighbors
|
train
|
def all_neighbors(graph, node, t=None):
""" Returns all of the neighbors of a node in the graph at time t.
If the graph is directed returns predecessors as well as successors.
Parameters
----------
graph : DyNetx graph
Graph to find neighbors.
node : node
The node whose neighbors will be returned.
t : snapshot id (default=None)
If None the neighbors are identified on the flattened graph.
Returns
-------
neighbors : iterator
Iterator of neighbors
"""
if graph.is_directed():
values = chain(graph.predecessors(node, t=t), graph.successors(node, t=t))
else:
values = graph.neighbors(node, t=t)
return values
|
python
|
{
"resource": ""
}
|
q5471
|
non_neighbors
|
train
|
def non_neighbors(graph, node, t=None):
"""Returns the non-neighbors of the node in the graph at time t.
Parameters
----------
graph : DyNetx graph
Graph to find neighbors.
node : node
The node whose neighbors will be returned.
t : snapshot id (default=None)
If None the non-neighbors are identified on the flattened graph.
Returns
-------
non_neighbors : iterator
Iterator of nodes in the graph that are not neighbors of the node.
"""
if graph.is_directed():
values = chain(graph.predecessors(node, t=t), graph.successors(node, t=t))
else:
values = graph.neighbors(node, t=t)
nbors = set(values) | {node}
return (nnode for nnode in graph if nnode not in nbors)
|
python
|
{
"resource": ""
}
|
q5472
|
non_interactions
|
train
|
def non_interactions(graph, t=None):
"""Returns the non-existent edges in the graph at time t.
Parameters
----------
graph : NetworkX graph.
Graph to find non-existent edges.
t : snapshot id (default=None)
If None the non-existent edges are identified on the flattened graph.
Returns
-------
non_edges : iterator
Iterator of edges that are not in the graph.
"""
# if graph.is_directed():
# for u in graph:
# for v in non_neighbors(graph, u, t):
# yield (u, v)
#else:
nodes = set(graph)
while nodes:
u = nodes.pop()
for v in nodes - set(graph[u]):
yield (u, v)
|
python
|
{
"resource": ""
}
|
q5473
|
FunctionTimedOut.getMsg
|
train
|
def getMsg(self):
'''
getMsg - Generate a default message based on parameters to FunctionTimedOut exception'
@return <str> - Message
'''
return 'Function %s (args=%s) (kwargs=%s) timed out after %f seconds.\n' %(self.timedOutFunction.__name__, repr(self.timedOutArgs), repr(self.timedOutKwargs), self.timedOutAfter)
|
python
|
{
"resource": ""
}
|
q5474
|
FunctionTimedOut.retry
|
train
|
def retry(self, timeout=RETRY_SAME_TIMEOUT):
'''
retry - Retry the timed-out function with same arguments.
@param timeout <float/RETRY_SAME_TIMEOUT/None> Default RETRY_SAME_TIMEOUT
If RETRY_SAME_TIMEOUT : Will retry the function same args, same timeout
If a float/int : Will retry the function same args with provided timeout
If None : Will retry function same args no timeout
@return - Returnval from function
'''
if timeout is None:
return self.timedOutFunction(*(self.timedOutArgs), **self.timedOutKwargs)
from .dafunc import func_timeout
if timeout == RETRY_SAME_TIMEOUT:
timeout = self.timedOutAfter
return func_timeout(timeout, self.timedOutFunction, args=self.timedOutArgs, kwargs=self.timedOutKwargs)
|
python
|
{
"resource": ""
}
|
q5475
|
JoinThread.run
|
train
|
def run(self):
'''
run - The thread main. Will attempt to stop and join the attached thread.
'''
# Try to silence default exception printing.
self.otherThread._Thread__stderr = self._stderr
if hasattr(self.otherThread, '_Thread__stop'):
# If py2, call this first to start thread termination cleanly.
# Python3 does not need such ( nor does it provide.. )
self.otherThread._Thread__stop()
while self.otherThread.isAlive():
# We loop raising exception incase it's caught hopefully this breaks us far out.
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self.otherThread.ident), ctypes.py_object(self.exception))
self.otherThread.join(self.repeatEvery)
try:
self._stderr.close()
except:
pass
|
python
|
{
"resource": ""
}
|
q5476
|
PDFIntegrator._make_map
|
train
|
def _make_map(self, limit):
""" Make vegas grid that is adapted to the pdf. """
ny = 2000
y = numpy.random.uniform(0., 1., (ny,1))
limit = numpy.arctan(limit)
m = AdaptiveMap([[-limit, limit]], ninc=100)
theta = numpy.empty(y.shape, float)
jac = numpy.empty(y.shape[0], float)
for itn in range(10):
m.map(y, theta, jac)
tan_theta = numpy.tan(theta[:, 0])
x = self.scale * tan_theta
fx = (tan_theta ** 2 + 1) * numpy.exp(-(x ** 2) / 2.)
m.add_training_data(y, (jac * fx) ** 2)
m.adapt(alpha=1.5)
return numpy.array(m.grid[0])
|
python
|
{
"resource": ""
}
|
q5477
|
PDFIntegrator._expval
|
train
|
def _expval(self, f, nopdf):
""" Return integrand using the tan mapping. """
def ff(theta, nopdf=nopdf):
tan_theta = numpy.tan(theta)
x = self.scale * tan_theta
jac = self.scale * (tan_theta ** 2 + 1.)
if nopdf:
pdf = jac * self.pdf.pjac[None, :]
else:
pdf = jac * numpy.exp(-(x ** 2) / 2.) / numpy.sqrt(2 * numpy.pi)
dp = self.pdf.x2dpflat(x)
parg = None
ans = None
fparg_is_dict = False
# iterate through the batch
for i, (dpi, pdfi) in enumerate(zip(dp, pdf)):
p = self.pdf.meanflat + dpi
if parg is None:
# first time only
if self.pdf.shape is None:
parg = _gvar.BufferDict(self.pdf.g, buf=p)
else:
parg = p.reshape(self.pdf.shape)
else:
if parg.shape is None:
parg.buf = p
else:
parg.flat[:] = p
fparg = 1. if f is None else f(parg)
if ans is None:
# first time only
if hasattr(fparg, 'keys'):
fparg_is_dict = True
if not isinstance(fparg, _gvar.BufferDict):
fparg = _gvar.BufferDict(fparg)
ans = _gvar.BufferDict()
for k in fparg:
ans[k] = numpy.empty(
(len(pdf),) + fparg.slice_shape(k)[1], float
)
else:
if numpy.shape(fparg) == ():
ans = numpy.empty(len(pdf), float)
else:
ans = numpy.empty(
(len(pdf),) + numpy.shape(fparg), float
)
if fparg_is_dict:
prod_pdfi = numpy.prod(pdfi)
for k in ans:
ans[k][i] = fparg[k]
ans[k][i] *= prod_pdfi
else:
if not isinstance(fparg, numpy.ndarray):
fparg = numpy.asarray(fparg)
ans[i] = fparg * numpy.prod(pdfi)
return ans
return ff
|
python
|
{
"resource": ""
}
|
q5478
|
REPL.dump
|
train
|
def dump(self, function_name):
"""
Pretty-dump the bytecode for the function with the given name.
"""
assert isinstance(function_name, str)
self.stdout.write(function_name)
self.stdout.write("\n")
self.stdout.write("-" * len(function_name))
self.stdout.write("\n\n")
byte_code = self.interpreter.compiled_functions[function_name]
self.stdout.write(byte_code.dump())
self.stdout.write("\n")
|
python
|
{
"resource": ""
}
|
q5479
|
Command.handle
|
train
|
def handle(self, **options):
"""Call "startapp" to generate app with custom user model."""
template = os.path.dirname(os.path.abspath(__file__)) + "/app_template"
name = options.pop("name")
call_command("startapp", name, template=template, **options)
|
python
|
{
"resource": ""
}
|
q5480
|
BaseUserManager._create_user
|
train
|
def _create_user(self, email, password, **extra_fields):
"""Create and save a User with the given email and password."""
if not email:
raise ValueError("The given email must be set")
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
|
python
|
{
"resource": ""
}
|
q5481
|
nwise
|
train
|
def nwise(iterable, n):
"""
Iterate through a sequence with a defined length window
>>> list(nwise(range(8), 3))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (5, 6, 7)]
>>> list(nwise(range(3), 5))
[]
Parameters
----------
iterable
n : length of each sequence
Yields
------
Tuples of length n
"""
iters = itertools.tee(iterable, n)
iters = (itertools.islice(it, i, None) for i, it in enumerate(iters))
return itertools.izip(*iters)
|
python
|
{
"resource": ""
}
|
q5482
|
step
|
train
|
def step(g, n1, n2, inbound=False, backward=False, continue_fn=None):
"""
Step along a path through a directed graph unless there is an intersection
Example graph:
Note that edge (1, 2) and (2, 3) are bidirectional, i.e., (2, 1) and
(3, 2) are also edges
1 -- 2 -- 3 -->-- 5 -->-- 7
| |
^ v
| |
4 6
>>> step(g, 1, 2)
3
>>> step(g, 3, 5)
None
>>> step(g, 2, 3)
5
>>> step(g, 2, 3, inbound=True)
None
>>> step(g, 7, 5, 3, backward=True)
3
>>> def f(g, n1, n2, backward):
if n2 == 5:
return 7
return None
>>> step(g, 3, 5, continue_fn=f)
7
Parameters
----------
g : networkx DiGraph
n1 : node id in g
n2 : node id in g
(n1, n2) must be an edge in g
inbound : bool (default False)
whether incoming edges should be considered
backward : bool (default False)
whether edges are in reverse order (i.e., point from n2 to n1)
continue_fn : callable (optional)
if at an intersection, continue_fn is called to indicate how to
proceed
continue_fn takes the form:
f(g, n1, n2, backward) where all arguments are as passed into step.
f should return a node id such that f(g, n1, n2, backward) is a
successors of n2. f should return None if no way forward.
Returns
-------
The next node in the path from n1 to n2. Returns None if there
are no edges from n2 or multiple edges from n2
"""
forw = g.successors
back = g.predecessors
if backward:
back, forw = forw, back
nodes = forw(n2)
if inbound:
nodes = set(nodes + back(n2))
candidates = [n for n in nodes if n != n1]
if len(candidates) == 1:
result = candidates[0]
elif continue_fn:
result = continue_fn(g, n1, n2, backward)
else:
result = None
return result
|
python
|
{
"resource": ""
}
|
q5483
|
move
|
train
|
def move(g, n1, n2, **kwargs):
"""
Step along a graph until it ends or reach an intersection
Example graph:
Note that edge (1, 2) and (2, 3) are bidirectional, i.e., (2, 1) and
(3, 2) are also edges
1 -- 2 -- 3 -->-- 5 -->-- 7
| |
^ v
| |
4 6
>>> list(move(g, 1, 2))
[1, 2, 3, 5] # Stops at 5 because you can get to both 6 and 7 from 3
>>> step(g, 1, 2, inbound=True)
[1, 2, 3]
Parameters
----------
Same as step()
Yields
------
Node IDs until either there is no path forward or the path reaches
an intersection
"""
prev = n1
curr = n2
_next = step(g, prev, curr, **kwargs)
yield prev
yield curr
visited_nodes = set([prev, curr])
while _next:
yield _next
if _next in visited_nodes:
return
visited_nodes.add(_next)
prev = curr
curr = _next
_next = step(g, prev, curr, **kwargs)
|
python
|
{
"resource": ""
}
|
q5484
|
is_intersection
|
train
|
def is_intersection(g, n):
"""
Determine if a node is an intersection
graph: 1 -->-- 2 -->-- 3
>>> is_intersection(g, 2)
False
graph:
1 -- 2 -- 3
|
4
>>> is_intersection(g, 2)
True
Parameters
----------
g : networkx DiGraph
n : node id
Returns
-------
bool
"""
return len(set(g.predecessors(n) + g.successors(n))) > 2
|
python
|
{
"resource": ""
}
|
q5485
|
LazyModel.as_dict
|
train
|
def as_dict(self):
"""
Returns the model as a dict
"""
if not self._is_valid:
self.validate()
from .converters import to_dict
return to_dict(self)
|
python
|
{
"resource": ""
}
|
q5486
|
GraphImporter.coords_callback
|
train
|
def coords_callback(self, data):
""" Callback for nodes that have no tags """
for node_id, lon, lat in data:
self.coords[node_id] = (lon, lat)
|
python
|
{
"resource": ""
}
|
q5487
|
GraphImporter.nodes_callback
|
train
|
def nodes_callback(self, data):
""" Callback for nodes with tags """
for node_id, tags, coords in data:
# Discard the coords because they go into add_coords
self.nodes[node_id] = tags
|
python
|
{
"resource": ""
}
|
q5488
|
GraphImporter.ways_callback
|
train
|
def ways_callback(self, data):
""" Callback for all ways """
for way_id, tags, nodes in data:
# Imposm passes all ways through regardless of whether the tags
# have been filtered or not. It needs to do this in order to
# handle relations, but we don't care about relations at the
# moment.
if tags:
self.ways[way_id] = (tags, nodes)
|
python
|
{
"resource": ""
}
|
q5489
|
GraphImporter.get_graph
|
train
|
def get_graph(self, parse_direction=False):
""" Return the networkx directed graph of received data """
g = nx.DiGraph()
for way_id, (tags, nodes) in self.ways.items():
# If oneway is '-1', reverse the way and treat as a normal oneway
if tags.get('oneway') == '-1':
nodes = reversed(nodes)
tags['oneway'] = 'yes'
oneway = tags.get('oneway') == 'yes'
for n0, n1 in tools.pairwise(nodes):
g.add_edge(n0, n1, attr_dict=tags)
if parse_direction:
g[n0][n1]['_direction'] = 'forward'
if not oneway:
g.add_edge(n1, n0, attr_dict=tags)
if parse_direction:
g[n1][n0]['_direction'] = 'backward'
g.node[n0].update(self._node_properties(n0))
g.node[n1].update(self._node_properties(n1))
return g
|
python
|
{
"resource": ""
}
|
q5490
|
parse_file
|
train
|
def parse_file(filename, parse_direction=False, **kwargs):
"""
Return an OSM networkx graph from the input OSM file
Only works with OSM xml, xml.bz2 and pbf files. This function cannot take
OSM QA tile files. Use parse_qa_tile() for QA tiles.
>>> graph = parse_file(filename)
"""
importer, parser = make_importer_parser(OSMParser, **kwargs)
parser.parse(filename)
return importer.get_graph(parse_direction=parse_direction)
|
python
|
{
"resource": ""
}
|
q5491
|
parse_data
|
train
|
def parse_data(data, type, **kwargs):
"""
Return an OSM networkx graph from the input OSM data
Parameters
----------
data : string
type : string ('xml' or 'pbf')
>>> graph = parse_data(data, 'xml')
"""
suffixes = {
'xml': '.osm',
'pbf': '.pbf',
}
try:
suffix = suffixes[type]
except KeyError:
raise ValueError('Unknown data type "%s"' % type)
fd, filename = tempfile.mkstemp(suffix=suffix)
try:
os.write(fd, data)
os.close(fd)
return parse_file(filename, **kwargs)
finally:
os.remove(filename)
|
python
|
{
"resource": ""
}
|
q5492
|
parse_qa_tile
|
train
|
def parse_qa_tile(x, y, zoom, data, parse_direction=False, **kwargs):
"""
Return an OSM networkx graph from the input OSM QA tile data
Parameters
----------
data : string
x : int
tile's x coordinate
y : int
tile's y coordinate
zoom : int
tile's zoom level
>>> graph = parse_qa_tile(data, 1239, 1514, 12)
"""
import osmqa
importer, parser = make_importer_parser(osmqa.QATileParser, **kwargs)
parser.parse_data(x, y, zoom, data)
return importer.get_graph(parse_direction=parse_direction)
|
python
|
{
"resource": ""
}
|
q5493
|
_basename
|
train
|
def _basename(fname):
"""Return file name without path."""
if not isinstance(fname, Path):
fname = Path(fname)
path, name, ext = fname.parent, fname.stem, fname.suffix
return path, name, ext
|
python
|
{
"resource": ""
}
|
q5494
|
from_btl
|
train
|
def from_btl(fname):
"""
DataFrame constructor to open Seabird CTD BTL-ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> bottles = ctd.from_btl(data_path.joinpath('btl', 'bottletest.btl'))
"""
f = _read_file(fname)
metadata = _parse_seabird(f.readlines(), ftype="btl")
f.seek(0)
df = pd.read_fwf(
f,
header=None,
index_col=False,
names=metadata["names"],
parse_dates=False,
skiprows=metadata["skiprows"],
)
f.close()
# At this point the data frame is not correctly lined up (multiple rows
# for avg, std, min, max or just avg, std, etc).
# Also needs date,time,and bottle number to be converted to one per line.
# Get row types, see what you have: avg, std, min, max or just avg, std.
rowtypes = df[df.columns[-1]].unique()
# Get times and dates which occur on second line of each bottle.
dates = df.iloc[:: len(rowtypes), 1].reset_index(drop=True)
times = df.iloc[1 :: len(rowtypes), 1].reset_index(drop=True)
datetimes = dates + " " + times
# Fill the Date column with datetimes.
df.loc[:: len(rowtypes), "Date"] = datetimes.values
df.loc[1 :: len(rowtypes), "Date"] = datetimes.values
# Fill missing rows.
df["Bottle"] = df["Bottle"].fillna(method="ffill")
df["Date"] = df["Date"].fillna(method="ffill")
df["Statistic"] = df["Statistic"].str.replace(r"\(|\)", "") # (avg) to avg
name = _basename(fname)[1]
dtypes = {
"bpos": int,
"pumps": bool,
"flag": bool,
"Bottle": int,
"Scan": int,
"Statistic": str,
"Date": str,
}
for column in df.columns:
if column in dtypes:
df[column] = df[column].astype(dtypes[column])
else:
try:
df[column] = df[column].astype(float)
except ValueError:
warnings.warn("Could not convert %s to float." % column)
df["Date"] = pd.to_datetime(df["Date"])
metadata["name"] = str(name)
setattr(df, "_metadata", metadata)
return df
|
python
|
{
"resource": ""
}
|
q5495
|
from_edf
|
train
|
def from_edf(fname):
"""
DataFrame constructor to open XBT EDF ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_edf(data_path.joinpath('XBT.EDF.gz'))
>>> ax = cast['temperature'].plot_cast()
"""
f = _read_file(fname)
header, names = [], []
for k, line in enumerate(f.readlines()):
line = line.strip()
if line.startswith("Serial Number"):
serial = line.strip().split(":")[1].strip()
elif line.startswith("Latitude"):
try:
hemisphere = line[-1]
lat = line.strip(hemisphere).split(":")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
except (IndexError, ValueError):
lat = None
elif line.startswith("Longitude"):
try:
hemisphere = line[-1]
lon = line.strip(hemisphere).split(":")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
except (IndexError, ValueError):
lon = None
else:
header.append(line)
if line.startswith("Field"):
col, unit = [l.strip().lower() for l in line.split(":")]
names.append(unit.split()[0])
if line == "// Data":
skiprows = k + 1
break
f.seek(0)
df = pd.read_csv(
f,
header=None,
index_col=None,
names=names,
skiprows=skiprows,
delim_whitespace=True,
)
f.close()
df.set_index("depth", drop=True, inplace=True)
df.index.name = "Depth [m]"
name = _basename(fname)[1]
metadata = {
"lon": lon,
"lat": lat,
"name": str(name),
"header": "\n".join(header),
"serial": serial,
}
setattr(df, "_metadata", metadata)
return df
|
python
|
{
"resource": ""
}
|
q5496
|
from_cnv
|
train
|
def from_cnv(fname):
"""
DataFrame constructor to open Seabird CTD CNV-ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_cnv(data_path.joinpath('CTD_big.cnv.bz2'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['t090C'].plot_cast()
"""
f = _read_file(fname)
metadata = _parse_seabird(f.readlines(), ftype="cnv")
f.seek(0)
df = pd.read_fwf(
f,
header=None,
index_col=None,
names=metadata["names"],
skiprows=metadata["skiprows"],
delim_whitespace=True,
widths=[11] * len(metadata["names"]),
)
f.close()
key_set = False
prkeys = ["prDM", "prdM", "pr"]
for prkey in prkeys:
try:
df.set_index(prkey, drop=True, inplace=True)
key_set = True
except KeyError:
continue
if not key_set:
raise KeyError(
f"Could not find pressure field (supported names are {prkeys})."
)
df.index.name = "Pressure [dbar]"
name = _basename(fname)[1]
dtypes = {"bpos": int, "pumps": bool, "flag": bool}
for column in df.columns:
if column in dtypes:
df[column] = df[column].astype(dtypes[column])
else:
try:
df[column] = df[column].astype(float)
except ValueError:
warnings.warn("Could not convert %s to float." % column)
metadata["name"] = str(name)
setattr(df, "_metadata", metadata)
return df
|
python
|
{
"resource": ""
}
|
q5497
|
extrap_sec
|
train
|
def extrap_sec(data, dist, depth, w1=1.0, w2=0):
"""
Extrapolates `data` to zones where the shallow stations are shadowed by
the deep stations. The shadow region usually cannot be extrapolates via
linear interpolation.
The extrapolation is applied using the gradients of the `data` at a certain
level.
Parameters
----------
data : array_like
Data to be extrapolated
dist : array_like
Stations distance
fd : float
Decay factor [0-1]
Returns
-------
Sec_extrap : array_like
Extrapolated variable
"""
from scipy.interpolate import interp1d
new_data1 = []
for row in data:
mask = ~np.isnan(row)
if mask.any():
y = row[mask]
if y.size == 1:
row = np.repeat(y, len(mask))
else:
x = dist[mask]
f_i = interp1d(x, y)
f_x = _extrap1d(f_i)
row = f_x(dist)
new_data1.append(row)
new_data2 = []
for col in data.T:
mask = ~np.isnan(col)
if mask.any():
y = col[mask]
if y.size == 1:
col = np.repeat(y, len(mask))
else:
z = depth[mask]
f_i = interp1d(z, y)
f_z = _extrap1d(f_i)
col = f_z(depth)
new_data2.append(col)
new_data = np.array(new_data1) * w1 + np.array(new_data2).T * w2
return new_data
|
python
|
{
"resource": ""
}
|
q5498
|
gen_topomask
|
train
|
def gen_topomask(h, lon, lat, dx=1.0, kind="linear", plot=False):
"""
Generates a topography mask from an oceanographic transect taking the
deepest CTD scan as the depth of each station.
Inputs
------
h : array
Pressure of the deepest CTD scan for each station [dbar].
lons : array
Longitude of each station [decimal degrees east].
lat : Latitude of each station. [decimal degrees north].
dx : float
Horizontal resolution of the output arrays [km].
kind : string, optional
Type of the interpolation to be performed.
See scipy.interpolate.interp1d documentation for details.
plot : bool
Whether to plot mask for visualization.
Outputs
-------
xm : array
Horizontal distances [km].
hm : array
Local depth [m].
Author
------
André Palóczy Filho (paloczy@gmail.com) -- October/2012
"""
import gsw
from scipy.interpolate import interp1d
h, lon, lat = list(map(np.asanyarray, (h, lon, lat)))
# Distance in km.
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
h = -gsw.z_from_p(h, lat.mean())
Ih = interp1d(x, h, kind=kind, bounds_error=False, fill_value=h[-1])
xm = np.arange(0, x.max() + dx, dx)
hm = Ih(xm)
return xm, hm
|
python
|
{
"resource": ""
}
|
q5499
|
plot_cast
|
train
|
def plot_cast(df, secondary_y=False, label=None, *args, **kwargs):
"""
Plot a CTD variable with the index in the y-axis instead of x-axis.
"""
ax = kwargs.pop("ax", None)
fignums = plt.get_fignums()
if ax is None and not fignums:
ax = plt.axes()
fig = ax.get_figure()
fig.set_size_inches((5.25, 6.75))
else:
ax = plt.gca()
fig = plt.gcf()
figsize = kwargs.pop("figsize", fig.get_size_inches())
fig.set_size_inches(figsize)
y_inverted = False
if not getattr(ax, "y_inverted", False):
setattr(ax, "y_inverted", True)
y_inverted = True
if secondary_y:
ax = ax.twiny()
xlabel = getattr(df, "name", None)
ylabel = getattr(df.index, "name", None)
if isinstance(df, pd.DataFrame):
labels = label if label else df.columns
for k, (col, series) in enumerate(df.iteritems()):
ax.plot(series, series.index, label=labels[k])
elif isinstance(df, pd.Series):
label = label if label else str(df.name)
ax.plot(df.values, df.index, label=label, *args, **kwargs)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if y_inverted and not secondary_y:
ax.invert_yaxis()
return ax
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.