repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
ska-sa/montblanc
montblanc/util/__init__.py
redistribute_threads
def redistribute_threads(blockdimx, blockdimy, blockdimz, dimx, dimy, dimz): """ Redistribute threads from the Z dimension towards the X dimension. Also clamp number of threads to the problem dimension size, if necessary """ # Shift threads from the z dimension # into the y dimension while blockdimz > dimz: tmp = blockdimz // 2 if tmp < dimz: break blockdimy *= 2 blockdimz = tmp # Shift threads from the y dimension # into the x dimension while blockdimy > dimy: tmp = blockdimy // 2 if tmp < dimy: break blockdimx *= 2 blockdimy = tmp # Clamp the block dimensions # if necessary if dimx < blockdimx: blockdimx = dimx if dimy < blockdimy: blockdimy = dimy if dimz < blockdimz: blockdimz = dimz return blockdimx, blockdimy, blockdimz
python
def redistribute_threads(blockdimx, blockdimy, blockdimz, dimx, dimy, dimz): """ Redistribute threads from the Z dimension towards the X dimension. Also clamp number of threads to the problem dimension size, if necessary """ # Shift threads from the z dimension # into the y dimension while blockdimz > dimz: tmp = blockdimz // 2 if tmp < dimz: break blockdimy *= 2 blockdimz = tmp # Shift threads from the y dimension # into the x dimension while blockdimy > dimy: tmp = blockdimy // 2 if tmp < dimy: break blockdimx *= 2 blockdimy = tmp # Clamp the block dimensions # if necessary if dimx < blockdimx: blockdimx = dimx if dimy < blockdimy: blockdimy = dimy if dimz < blockdimz: blockdimz = dimz return blockdimx, blockdimy, blockdimz
[ "def", "redistribute_threads", "(", "blockdimx", ",", "blockdimy", ",", "blockdimz", ",", "dimx", ",", "dimy", ",", "dimz", ")", ":", "# Shift threads from the z dimension", "# into the y dimension", "while", "blockdimz", ">", "dimz", ":", "tmp", "=", "blockdimz", "//", "2", "if", "tmp", "<", "dimz", ":", "break", "blockdimy", "*=", "2", "blockdimz", "=", "tmp", "# Shift threads from the y dimension", "# into the x dimension", "while", "blockdimy", ">", "dimy", ":", "tmp", "=", "blockdimy", "//", "2", "if", "tmp", "<", "dimy", ":", "break", "blockdimx", "*=", "2", "blockdimy", "=", "tmp", "# Clamp the block dimensions", "# if necessary", "if", "dimx", "<", "blockdimx", ":", "blockdimx", "=", "dimx", "if", "dimy", "<", "blockdimy", ":", "blockdimy", "=", "dimy", "if", "dimz", "<", "blockdimz", ":", "blockdimz", "=", "dimz", "return", "blockdimx", ",", "blockdimy", ",", "blockdimz" ]
Redistribute threads from the Z dimension towards the X dimension. Also clamp number of threads to the problem dimension size, if necessary
[ "Redistribute", "threads", "from", "the", "Z", "dimension", "towards", "the", "X", "dimension", ".", "Also", "clamp", "number", "of", "threads", "to", "the", "problem", "dimension", "size", "if", "necessary" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L387-L426
ska-sa/montblanc
montblanc/util/__init__.py
register_default_dimensions
def register_default_dimensions(cube, slvr_cfg): """ Register the default dimensions for a RIME solver """ import montblanc.src_types as mbs # Pull out the configuration options for the basics autocor = slvr_cfg['auto_correlations'] ntime = 10 na = 7 nbands = 1 nchan = 16 npol = 4 # Infer number of baselines from number of antenna, nbl = nr_of_baselines(na, autocor) if not npol == 4: raise ValueError("npol set to {}, but only 4 polarisations " "are currently supported.") # Register these dimensions on this solver. cube.register_dimension('ntime', ntime, description="Timesteps") cube.register_dimension('na', na, description="Antenna") cube.register_dimension('nbands', nbands, description="Bands") cube.register_dimension('nchan', nchan, description="Channels") cube.register_dimension('npol', npol, description="Polarisations") cube.register_dimension('nbl', nbl, description="Baselines") # Register dependent dimensions cube.register_dimension('npolchan', nchan*npol, description='Polarised channels') cube.register_dimension('nvis', ntime*nbl*nchan, description='Visibilities') # Convert the source types, and their numbers # to their number variables and numbers # { 'point':10 } => { 'npsrc':10 } src_cfg = default_sources() src_nr_vars = sources_to_nr_vars(src_cfg) # Sum to get the total number of sources cube.register_dimension('nsrc', sum(src_nr_vars.itervalues()), description="Sources (Total)") # Register the individual source types for nr_var, nr_of_src in src_nr_vars.iteritems(): cube.register_dimension(nr_var, nr_of_src, description='{} sources'.format(mbs.SOURCE_DIM_TYPES[nr_var]))
python
def register_default_dimensions(cube, slvr_cfg): """ Register the default dimensions for a RIME solver """ import montblanc.src_types as mbs # Pull out the configuration options for the basics autocor = slvr_cfg['auto_correlations'] ntime = 10 na = 7 nbands = 1 nchan = 16 npol = 4 # Infer number of baselines from number of antenna, nbl = nr_of_baselines(na, autocor) if not npol == 4: raise ValueError("npol set to {}, but only 4 polarisations " "are currently supported.") # Register these dimensions on this solver. cube.register_dimension('ntime', ntime, description="Timesteps") cube.register_dimension('na', na, description="Antenna") cube.register_dimension('nbands', nbands, description="Bands") cube.register_dimension('nchan', nchan, description="Channels") cube.register_dimension('npol', npol, description="Polarisations") cube.register_dimension('nbl', nbl, description="Baselines") # Register dependent dimensions cube.register_dimension('npolchan', nchan*npol, description='Polarised channels') cube.register_dimension('nvis', ntime*nbl*nchan, description='Visibilities') # Convert the source types, and their numbers # to their number variables and numbers # { 'point':10 } => { 'npsrc':10 } src_cfg = default_sources() src_nr_vars = sources_to_nr_vars(src_cfg) # Sum to get the total number of sources cube.register_dimension('nsrc', sum(src_nr_vars.itervalues()), description="Sources (Total)") # Register the individual source types for nr_var, nr_of_src in src_nr_vars.iteritems(): cube.register_dimension(nr_var, nr_of_src, description='{} sources'.format(mbs.SOURCE_DIM_TYPES[nr_var]))
[ "def", "register_default_dimensions", "(", "cube", ",", "slvr_cfg", ")", ":", "import", "montblanc", ".", "src_types", "as", "mbs", "# Pull out the configuration options for the basics", "autocor", "=", "slvr_cfg", "[", "'auto_correlations'", "]", "ntime", "=", "10", "na", "=", "7", "nbands", "=", "1", "nchan", "=", "16", "npol", "=", "4", "# Infer number of baselines from number of antenna,", "nbl", "=", "nr_of_baselines", "(", "na", ",", "autocor", ")", "if", "not", "npol", "==", "4", ":", "raise", "ValueError", "(", "\"npol set to {}, but only 4 polarisations \"", "\"are currently supported.\"", ")", "# Register these dimensions on this solver.", "cube", ".", "register_dimension", "(", "'ntime'", ",", "ntime", ",", "description", "=", "\"Timesteps\"", ")", "cube", ".", "register_dimension", "(", "'na'", ",", "na", ",", "description", "=", "\"Antenna\"", ")", "cube", ".", "register_dimension", "(", "'nbands'", ",", "nbands", ",", "description", "=", "\"Bands\"", ")", "cube", ".", "register_dimension", "(", "'nchan'", ",", "nchan", ",", "description", "=", "\"Channels\"", ")", "cube", ".", "register_dimension", "(", "'npol'", ",", "npol", ",", "description", "=", "\"Polarisations\"", ")", "cube", ".", "register_dimension", "(", "'nbl'", ",", "nbl", ",", "description", "=", "\"Baselines\"", ")", "# Register dependent dimensions", "cube", ".", "register_dimension", "(", "'npolchan'", ",", "nchan", "*", "npol", ",", "description", "=", "'Polarised channels'", ")", "cube", ".", "register_dimension", "(", "'nvis'", ",", "ntime", "*", "nbl", "*", "nchan", ",", "description", "=", "'Visibilities'", ")", "# Convert the source types, and their numbers", "# to their number variables and numbers", "# { 'point':10 } => { 'npsrc':10 }", "src_cfg", "=", "default_sources", "(", ")", "src_nr_vars", "=", "sources_to_nr_vars", "(", "src_cfg", ")", "# Sum to get the total number of sources", "cube", ".", "register_dimension", "(", "'nsrc'", ",", "sum", "(", "src_nr_vars", ".", "itervalues", "(", ")", ")", ",", "description", "=", "\"Sources (Total)\"", ")", "# Register the individual source types", "for", "nr_var", ",", "nr_of_src", "in", "src_nr_vars", ".", "iteritems", "(", ")", ":", "cube", ".", "register_dimension", "(", "nr_var", ",", "nr_of_src", ",", "description", "=", "'{} sources'", ".", "format", "(", "mbs", ".", "SOURCE_DIM_TYPES", "[", "nr_var", "]", ")", ")" ]
Register the default dimensions for a RIME solver
[ "Register", "the", "default", "dimensions", "for", "a", "RIME", "solver" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L429-L482
ska-sa/montblanc
montblanc/impl/rime/tensorflow/helpers/cluster_gen.py
get_ip_address
def get_ip_address(ifname): """ Hack to get IP address from the interface """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24])
python
def get_ip_address(ifname): """ Hack to get IP address from the interface """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24])
[ "def", "get_ip_address", "(", "ifname", ")", ":", "s", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "return", "socket", ".", "inet_ntoa", "(", "fcntl", ".", "ioctl", "(", "s", ".", "fileno", "(", ")", ",", "0x8915", ",", "# SIOCGIFADDR", "struct", ".", "pack", "(", "'256s'", ",", "ifname", "[", ":", "15", "]", ")", ")", "[", "20", ":", "24", "]", ")" ]
Hack to get IP address from the interface
[ "Hack", "to", "get", "IP", "address", "from", "the", "interface" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/helpers/cluster_gen.py#L26-L34
ska-sa/montblanc
install/cuda.py
nvcc_compiler_settings
def nvcc_compiler_settings(): """ Find nvcc and the CUDA installation """ search_paths = os.environ.get('PATH', '').split(os.pathsep) nvcc_path = find_in_path('nvcc', search_paths) default_cuda_path = os.path.join('usr', 'local', 'cuda') cuda_path = os.environ.get('CUDA_PATH', default_cuda_path) nvcc_found = os.path.exists(nvcc_path) cuda_path_found = os.path.exists(cuda_path) # Can't find either NVCC or some CUDA_PATH if not nvcc_found and not cuda_path_found: raise InspectCudaException("Neither nvcc '{}' " "or the CUDA_PATH '{}' were found!".format( nvcc_path, cuda_path)) # No NVCC, try find it in the CUDA_PATH if not nvcc_found: log.warn("nvcc compiler not found at '{}'. " "Searching within the CUDA_PATH '{}'" .format(nvcc_path, cuda_path)) bin_dir = os.path.join(cuda_path, 'bin') nvcc_path = find_in_path('nvcc', bin_dir) nvcc_found = os.path.exists(nvcc_path) if not nvcc_found: raise InspectCudaException("nvcc not found in '{}' " "or under the CUDA_PATH at '{}' " .format(search_paths, cuda_path)) # No CUDA_PATH found, infer it from NVCC if not cuda_path_found: cuda_path = os.path.normpath( os.path.join(os.path.dirname(nvcc_path), "..")) log.warn("CUDA_PATH not found, inferring it as '{}' " "from the nvcc location '{}'".format( cuda_path, nvcc_path)) cuda_path_found = True # Set up the compiler settings include_dirs = [] library_dirs = [] define_macros = [] if cuda_path_found: include_dirs.append(os.path.join(cuda_path, 'include')) if sys.platform == 'win32': library_dirs.append(os.path.join(cuda_path, 'bin')) library_dirs.append(os.path.join(cuda_path, 'lib', 'x64')) else: library_dirs.append(os.path.join(cuda_path, 'lib64')) library_dirs.append(os.path.join(cuda_path, 'lib')) if sys.platform == 'darwin': library_dirs.append(os.path.join(default_cuda_path, 'lib')) return { 'cuda_available' : True, 'nvcc_path' : nvcc_path, 'include_dirs': include_dirs, 'library_dirs': library_dirs, 'define_macros': define_macros, 'libraries' : ['cudart', 'cuda'], 'language': 'c++', }
python
def nvcc_compiler_settings(): """ Find nvcc and the CUDA installation """ search_paths = os.environ.get('PATH', '').split(os.pathsep) nvcc_path = find_in_path('nvcc', search_paths) default_cuda_path = os.path.join('usr', 'local', 'cuda') cuda_path = os.environ.get('CUDA_PATH', default_cuda_path) nvcc_found = os.path.exists(nvcc_path) cuda_path_found = os.path.exists(cuda_path) # Can't find either NVCC or some CUDA_PATH if not nvcc_found and not cuda_path_found: raise InspectCudaException("Neither nvcc '{}' " "or the CUDA_PATH '{}' were found!".format( nvcc_path, cuda_path)) # No NVCC, try find it in the CUDA_PATH if not nvcc_found: log.warn("nvcc compiler not found at '{}'. " "Searching within the CUDA_PATH '{}'" .format(nvcc_path, cuda_path)) bin_dir = os.path.join(cuda_path, 'bin') nvcc_path = find_in_path('nvcc', bin_dir) nvcc_found = os.path.exists(nvcc_path) if not nvcc_found: raise InspectCudaException("nvcc not found in '{}' " "or under the CUDA_PATH at '{}' " .format(search_paths, cuda_path)) # No CUDA_PATH found, infer it from NVCC if not cuda_path_found: cuda_path = os.path.normpath( os.path.join(os.path.dirname(nvcc_path), "..")) log.warn("CUDA_PATH not found, inferring it as '{}' " "from the nvcc location '{}'".format( cuda_path, nvcc_path)) cuda_path_found = True # Set up the compiler settings include_dirs = [] library_dirs = [] define_macros = [] if cuda_path_found: include_dirs.append(os.path.join(cuda_path, 'include')) if sys.platform == 'win32': library_dirs.append(os.path.join(cuda_path, 'bin')) library_dirs.append(os.path.join(cuda_path, 'lib', 'x64')) else: library_dirs.append(os.path.join(cuda_path, 'lib64')) library_dirs.append(os.path.join(cuda_path, 'lib')) if sys.platform == 'darwin': library_dirs.append(os.path.join(default_cuda_path, 'lib')) return { 'cuda_available' : True, 'nvcc_path' : nvcc_path, 'include_dirs': include_dirs, 'library_dirs': library_dirs, 'define_macros': define_macros, 'libraries' : ['cudart', 'cuda'], 'language': 'c++', }
[ "def", "nvcc_compiler_settings", "(", ")", ":", "search_paths", "=", "os", ".", "environ", ".", "get", "(", "'PATH'", ",", "''", ")", ".", "split", "(", "os", ".", "pathsep", ")", "nvcc_path", "=", "find_in_path", "(", "'nvcc'", ",", "search_paths", ")", "default_cuda_path", "=", "os", ".", "path", ".", "join", "(", "'usr'", ",", "'local'", ",", "'cuda'", ")", "cuda_path", "=", "os", ".", "environ", ".", "get", "(", "'CUDA_PATH'", ",", "default_cuda_path", ")", "nvcc_found", "=", "os", ".", "path", ".", "exists", "(", "nvcc_path", ")", "cuda_path_found", "=", "os", ".", "path", ".", "exists", "(", "cuda_path", ")", "# Can't find either NVCC or some CUDA_PATH", "if", "not", "nvcc_found", "and", "not", "cuda_path_found", ":", "raise", "InspectCudaException", "(", "\"Neither nvcc '{}' \"", "\"or the CUDA_PATH '{}' were found!\"", ".", "format", "(", "nvcc_path", ",", "cuda_path", ")", ")", "# No NVCC, try find it in the CUDA_PATH", "if", "not", "nvcc_found", ":", "log", ".", "warn", "(", "\"nvcc compiler not found at '{}'. \"", "\"Searching within the CUDA_PATH '{}'\"", ".", "format", "(", "nvcc_path", ",", "cuda_path", ")", ")", "bin_dir", "=", "os", ".", "path", ".", "join", "(", "cuda_path", ",", "'bin'", ")", "nvcc_path", "=", "find_in_path", "(", "'nvcc'", ",", "bin_dir", ")", "nvcc_found", "=", "os", ".", "path", ".", "exists", "(", "nvcc_path", ")", "if", "not", "nvcc_found", ":", "raise", "InspectCudaException", "(", "\"nvcc not found in '{}' \"", "\"or under the CUDA_PATH at '{}' \"", ".", "format", "(", "search_paths", ",", "cuda_path", ")", ")", "# No CUDA_PATH found, infer it from NVCC", "if", "not", "cuda_path_found", ":", "cuda_path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "nvcc_path", ")", ",", "\"..\"", ")", ")", "log", ".", "warn", "(", "\"CUDA_PATH not found, inferring it as '{}' \"", "\"from the nvcc location '{}'\"", ".", "format", "(", "cuda_path", ",", "nvcc_path", ")", ")", "cuda_path_found", "=", "True", "# Set up the compiler settings", "include_dirs", "=", "[", "]", "library_dirs", "=", "[", "]", "define_macros", "=", "[", "]", "if", "cuda_path_found", ":", "include_dirs", ".", "append", "(", "os", ".", "path", ".", "join", "(", "cuda_path", ",", "'include'", ")", ")", "if", "sys", ".", "platform", "==", "'win32'", ":", "library_dirs", ".", "append", "(", "os", ".", "path", ".", "join", "(", "cuda_path", ",", "'bin'", ")", ")", "library_dirs", ".", "append", "(", "os", ".", "path", ".", "join", "(", "cuda_path", ",", "'lib'", ",", "'x64'", ")", ")", "else", ":", "library_dirs", ".", "append", "(", "os", ".", "path", ".", "join", "(", "cuda_path", ",", "'lib64'", ")", ")", "library_dirs", ".", "append", "(", "os", ".", "path", ".", "join", "(", "cuda_path", ",", "'lib'", ")", ")", "if", "sys", ".", "platform", "==", "'darwin'", ":", "library_dirs", ".", "append", "(", "os", ".", "path", ".", "join", "(", "default_cuda_path", ",", "'lib'", ")", ")", "return", "{", "'cuda_available'", ":", "True", ",", "'nvcc_path'", ":", "nvcc_path", ",", "'include_dirs'", ":", "include_dirs", ",", "'library_dirs'", ":", "library_dirs", ",", "'define_macros'", ":", "define_macros", ",", "'libraries'", ":", "[", "'cudart'", ",", "'cuda'", "]", ",", "'language'", ":", "'c++'", ",", "}" ]
Find nvcc and the CUDA installation
[ "Find", "nvcc", "and", "the", "CUDA", "installation" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/install/cuda.py#L48-L115
ska-sa/montblanc
install/cuda.py
inspect_cuda_version_and_devices
def inspect_cuda_version_and_devices(compiler, settings): """ Poor mans deviceQuery. Returns CUDA_VERSION information and CUDA device information in JSON format """ try: output = build_and_run(compiler, ''' #include <cuda.h> #include <stdio.h> __device__ void test(int * in, int * out) { int tid = blockIdx.x*blockDim.x + threadIdx.x; out[tid] = in[tid]; } int main(int argc, char* argv[]) { printf("{\\n"); printf(" \\"cuda_version\\": %d,\\n", CUDA_VERSION); printf(" \\"devices\\": [\\n"); int nr_of_devices = 0; cudaGetDeviceCount(&nr_of_devices); for(int d=0; d < nr_of_devices; ++d) { cudaDeviceProp p; cudaGetDeviceProperties(&p, d); printf(" {\\n"); bool last = (d == nr_of_devices-1); printf(" \\"name\\": \\"%s\\",\\n", p.name); printf(" \\"major\\": %d,\\n", p.major); printf(" \\"minor\\": %d,\\n", p.minor); printf(" \\"memory\\": %lu\\n", p.totalGlobalMem); printf(" }%s\\n", last ? "" : ","); } printf(" ]\\n"); printf("}\\n"); return 0; } ''', filename='test.cu', include_dirs=settings['include_dirs'], library_dirs=settings['library_dirs'], libraries=settings['libraries']) except Exception as e: msg = ("Running the CUDA device check " "stub failed\n{}".format(str(e))) raise InspectCudaException(msg), None, sys.exc_info()[2] return output
python
def inspect_cuda_version_and_devices(compiler, settings): """ Poor mans deviceQuery. Returns CUDA_VERSION information and CUDA device information in JSON format """ try: output = build_and_run(compiler, ''' #include <cuda.h> #include <stdio.h> __device__ void test(int * in, int * out) { int tid = blockIdx.x*blockDim.x + threadIdx.x; out[tid] = in[tid]; } int main(int argc, char* argv[]) { printf("{\\n"); printf(" \\"cuda_version\\": %d,\\n", CUDA_VERSION); printf(" \\"devices\\": [\\n"); int nr_of_devices = 0; cudaGetDeviceCount(&nr_of_devices); for(int d=0; d < nr_of_devices; ++d) { cudaDeviceProp p; cudaGetDeviceProperties(&p, d); printf(" {\\n"); bool last = (d == nr_of_devices-1); printf(" \\"name\\": \\"%s\\",\\n", p.name); printf(" \\"major\\": %d,\\n", p.major); printf(" \\"minor\\": %d,\\n", p.minor); printf(" \\"memory\\": %lu\\n", p.totalGlobalMem); printf(" }%s\\n", last ? "" : ","); } printf(" ]\\n"); printf("}\\n"); return 0; } ''', filename='test.cu', include_dirs=settings['include_dirs'], library_dirs=settings['library_dirs'], libraries=settings['libraries']) except Exception as e: msg = ("Running the CUDA device check " "stub failed\n{}".format(str(e))) raise InspectCudaException(msg), None, sys.exc_info()[2] return output
[ "def", "inspect_cuda_version_and_devices", "(", "compiler", ",", "settings", ")", ":", "try", ":", "output", "=", "build_and_run", "(", "compiler", ",", "'''\n #include <cuda.h>\n #include <stdio.h>\n\n __device__ void test(int * in, int * out)\n {\n int tid = blockIdx.x*blockDim.x + threadIdx.x;\n out[tid] = in[tid];\n }\n\n int main(int argc, char* argv[]) {\n\n printf(\"{\\\\n\");\n printf(\" \\\\\"cuda_version\\\\\": %d,\\\\n\", CUDA_VERSION);\n\n printf(\" \\\\\"devices\\\\\": [\\\\n\");\n\n int nr_of_devices = 0;\n cudaGetDeviceCount(&nr_of_devices);\n\n for(int d=0; d < nr_of_devices; ++d)\n {\n cudaDeviceProp p;\n cudaGetDeviceProperties(&p, d);\n\n printf(\" {\\\\n\");\n\n bool last = (d == nr_of_devices-1);\n\n printf(\" \\\\\"name\\\\\": \\\\\"%s\\\\\",\\\\n\", p.name);\n printf(\" \\\\\"major\\\\\": %d,\\\\n\", p.major);\n printf(\" \\\\\"minor\\\\\": %d,\\\\n\", p.minor);\n printf(\" \\\\\"memory\\\\\": %lu\\\\n\", p.totalGlobalMem);\n\n printf(\" }%s\\\\n\", last ? \"\" : \",\");\n }\n\n printf(\" ]\\\\n\");\n printf(\"}\\\\n\");\n\n return 0;\n }\n '''", ",", "filename", "=", "'test.cu'", ",", "include_dirs", "=", "settings", "[", "'include_dirs'", "]", ",", "library_dirs", "=", "settings", "[", "'library_dirs'", "]", ",", "libraries", "=", "settings", "[", "'libraries'", "]", ")", "except", "Exception", "as", "e", ":", "msg", "=", "(", "\"Running the CUDA device check \"", "\"stub failed\\n{}\"", ".", "format", "(", "str", "(", "e", ")", ")", ")", "raise", "InspectCudaException", "(", "msg", ")", ",", "None", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]", "return", "output" ]
Poor mans deviceQuery. Returns CUDA_VERSION information and CUDA device information in JSON format
[ "Poor", "mans", "deviceQuery", ".", "Returns", "CUDA_VERSION", "information", "and", "CUDA", "device", "information", "in", "JSON", "format" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/install/cuda.py#L117-L176
ska-sa/montblanc
install/cuda.py
customize_compiler_for_nvcc
def customize_compiler_for_nvcc(compiler, nvcc_settings): """inject deep into distutils to customize gcc/nvcc dispatch """ # tell the compiler it can process .cu files compiler.src_extensions.append('.cu') # save references to the default compiler_so and _compile methods default_compiler_so = compiler.compiler_so default_compile = compiler._compile # now redefine the _compile method. This gets executed for each # object but distutils doesn't have the ability to change compilers # based on source extension: we add it. def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts): # Use NVCC for .cu files if os.path.splitext(src)[1] == '.cu': compiler.set_executable('compiler_so', nvcc_settings['nvcc_path']) default_compile(obj, src, ext, cc_args, extra_postargs, pp_opts) # reset the default compiler_so, which we might have changed for cuda compiler.compiler_so = default_compiler_so # inject our redefined _compile method into the class compiler._compile = _compile
python
def customize_compiler_for_nvcc(compiler, nvcc_settings): """inject deep into distutils to customize gcc/nvcc dispatch """ # tell the compiler it can process .cu files compiler.src_extensions.append('.cu') # save references to the default compiler_so and _compile methods default_compiler_so = compiler.compiler_so default_compile = compiler._compile # now redefine the _compile method. This gets executed for each # object but distutils doesn't have the ability to change compilers # based on source extension: we add it. def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts): # Use NVCC for .cu files if os.path.splitext(src)[1] == '.cu': compiler.set_executable('compiler_so', nvcc_settings['nvcc_path']) default_compile(obj, src, ext, cc_args, extra_postargs, pp_opts) # reset the default compiler_so, which we might have changed for cuda compiler.compiler_so = default_compiler_so # inject our redefined _compile method into the class compiler._compile = _compile
[ "def", "customize_compiler_for_nvcc", "(", "compiler", ",", "nvcc_settings", ")", ":", "# tell the compiler it can process .cu files", "compiler", ".", "src_extensions", ".", "append", "(", "'.cu'", ")", "# save references to the default compiler_so and _compile methods", "default_compiler_so", "=", "compiler", ".", "compiler_so", "default_compile", "=", "compiler", ".", "_compile", "# now redefine the _compile method. This gets executed for each", "# object but distutils doesn't have the ability to change compilers", "# based on source extension: we add it.", "def", "_compile", "(", "obj", ",", "src", ",", "ext", ",", "cc_args", ",", "extra_postargs", ",", "pp_opts", ")", ":", "# Use NVCC for .cu files", "if", "os", ".", "path", ".", "splitext", "(", "src", ")", "[", "1", "]", "==", "'.cu'", ":", "compiler", ".", "set_executable", "(", "'compiler_so'", ",", "nvcc_settings", "[", "'nvcc_path'", "]", ")", "default_compile", "(", "obj", ",", "src", ",", "ext", ",", "cc_args", ",", "extra_postargs", ",", "pp_opts", ")", "# reset the default compiler_so, which we might have changed for cuda", "compiler", ".", "compiler_so", "=", "default_compiler_so", "# inject our redefined _compile method into the class", "compiler", ".", "_compile", "=", "_compile" ]
inject deep into distutils to customize gcc/nvcc dispatch
[ "inject", "deep", "into", "distutils", "to", "customize", "gcc", "/", "nvcc", "dispatch" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/install/cuda.py#L215-L238
ska-sa/montblanc
install/cuda.py
inspect_cuda
def inspect_cuda(): """ Return cuda device information and nvcc/cuda setup """ nvcc_settings = nvcc_compiler_settings() sysconfig.get_config_vars() nvcc_compiler = ccompiler.new_compiler() sysconfig.customize_compiler(nvcc_compiler) customize_compiler_for_nvcc(nvcc_compiler, nvcc_settings) output = inspect_cuda_version_and_devices(nvcc_compiler, nvcc_settings) return json.loads(output), nvcc_settings
python
def inspect_cuda(): """ Return cuda device information and nvcc/cuda setup """ nvcc_settings = nvcc_compiler_settings() sysconfig.get_config_vars() nvcc_compiler = ccompiler.new_compiler() sysconfig.customize_compiler(nvcc_compiler) customize_compiler_for_nvcc(nvcc_compiler, nvcc_settings) output = inspect_cuda_version_and_devices(nvcc_compiler, nvcc_settings) return json.loads(output), nvcc_settings
[ "def", "inspect_cuda", "(", ")", ":", "nvcc_settings", "=", "nvcc_compiler_settings", "(", ")", "sysconfig", ".", "get_config_vars", "(", ")", "nvcc_compiler", "=", "ccompiler", ".", "new_compiler", "(", ")", "sysconfig", ".", "customize_compiler", "(", "nvcc_compiler", ")", "customize_compiler_for_nvcc", "(", "nvcc_compiler", ",", "nvcc_settings", ")", "output", "=", "inspect_cuda_version_and_devices", "(", "nvcc_compiler", ",", "nvcc_settings", ")", "return", "json", ".", "loads", "(", "output", ")", ",", "nvcc_settings" ]
Return cuda device information and nvcc/cuda setup
[ "Return", "cuda", "device", "information", "and", "nvcc", "/", "cuda", "setup" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/install/cuda.py#L241-L251
ska-sa/montblanc
montblanc/solvers/rime_solver.py
RIMESolver.template_dict
def template_dict(self): """ Returns a dictionary suitable for templating strings with properties and dimensions related to this Solver object. Used in templated GPU kernels. """ slvr = self D = { # Constants 'LIGHTSPEED': montblanc.constants.C, } # Map any types D.update(self.type_dict()) # Update with dimensions D.update(self.dim_local_size_dict()) # Add any registered properties to the dictionary for p in self._properties.itervalues(): D[p.name] = getattr(self, p.name) return D
python
def template_dict(self): """ Returns a dictionary suitable for templating strings with properties and dimensions related to this Solver object. Used in templated GPU kernels. """ slvr = self D = { # Constants 'LIGHTSPEED': montblanc.constants.C, } # Map any types D.update(self.type_dict()) # Update with dimensions D.update(self.dim_local_size_dict()) # Add any registered properties to the dictionary for p in self._properties.itervalues(): D[p.name] = getattr(self, p.name) return D
[ "def", "template_dict", "(", "self", ")", ":", "slvr", "=", "self", "D", "=", "{", "# Constants", "'LIGHTSPEED'", ":", "montblanc", ".", "constants", ".", "C", ",", "}", "# Map any types", "D", ".", "update", "(", "self", ".", "type_dict", "(", ")", ")", "# Update with dimensions", "D", ".", "update", "(", "self", ".", "dim_local_size_dict", "(", ")", ")", "# Add any registered properties to the dictionary", "for", "p", "in", "self", ".", "_properties", ".", "itervalues", "(", ")", ":", "D", "[", "p", ".", "name", "]", "=", "getattr", "(", "self", ",", "p", ".", "name", ")", "return", "D" ]
Returns a dictionary suitable for templating strings with properties and dimensions related to this Solver object. Used in templated GPU kernels.
[ "Returns", "a", "dictionary", "suitable", "for", "templating", "strings", "with", "properties", "and", "dimensions", "related", "to", "this", "Solver", "object", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/solvers/rime_solver.py#L84-L108
ska-sa/montblanc
montblanc/factory.py
rime_solver
def rime_solver(slvr_cfg): """ Factory function that produces a RIME solver """ from montblanc.impl.rime.tensorflow.RimeSolver import RimeSolver return RimeSolver(slvr_cfg)
python
def rime_solver(slvr_cfg): """ Factory function that produces a RIME solver """ from montblanc.impl.rime.tensorflow.RimeSolver import RimeSolver return RimeSolver(slvr_cfg)
[ "def", "rime_solver", "(", "slvr_cfg", ")", ":", "from", "montblanc", ".", "impl", ".", "rime", ".", "tensorflow", ".", "RimeSolver", "import", "RimeSolver", "return", "RimeSolver", "(", "slvr_cfg", ")" ]
Factory function that produces a RIME solver
[ "Factory", "function", "that", "produces", "a", "RIME", "solver" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/factory.py#L21-L24
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/source_provider.py
find_sources
def find_sources(obj, argspec=None): """ Returns a dictionary of source methods found on this object, keyed on method name. Source methods are identified.by argspec, a list of argument specifiers. So for e.g. an argpsec of :code:`[['self', 'context'], ['s', 'c']]` would match methods looking like: .. code-block:: python def f(self, context): ... .. code-block:: python def f(s, c): ... is but not .. code-block:: python def f(self, ctx): ... """ if argspec is None: argspec = [DEFAULT_ARGSPEC] return { n: m for n, m in inspect.getmembers(obj, callable) if not n.startswith('_') and inspect.getargspec(m).args in argspec }
python
def find_sources(obj, argspec=None): """ Returns a dictionary of source methods found on this object, keyed on method name. Source methods are identified.by argspec, a list of argument specifiers. So for e.g. an argpsec of :code:`[['self', 'context'], ['s', 'c']]` would match methods looking like: .. code-block:: python def f(self, context): ... .. code-block:: python def f(s, c): ... is but not .. code-block:: python def f(self, ctx): ... """ if argspec is None: argspec = [DEFAULT_ARGSPEC] return { n: m for n, m in inspect.getmembers(obj, callable) if not n.startswith('_') and inspect.getargspec(m).args in argspec }
[ "def", "find_sources", "(", "obj", ",", "argspec", "=", "None", ")", ":", "if", "argspec", "is", "None", ":", "argspec", "=", "[", "DEFAULT_ARGSPEC", "]", "return", "{", "n", ":", "m", "for", "n", ",", "m", "in", "inspect", ".", "getmembers", "(", "obj", ",", "callable", ")", "if", "not", "n", ".", "startswith", "(", "'_'", ")", "and", "inspect", ".", "getargspec", "(", "m", ")", ".", "args", "in", "argspec", "}" ]
Returns a dictionary of source methods found on this object, keyed on method name. Source methods are identified.by argspec, a list of argument specifiers. So for e.g. an argpsec of :code:`[['self', 'context'], ['s', 'c']]` would match methods looking like: .. code-block:: python def f(self, context): ... .. code-block:: python def f(s, c): ... is but not .. code-block:: python def f(self, ctx): ...
[ "Returns", "a", "dictionary", "of", "source", "methods", "found", "on", "this", "object", "keyed", "on", "method", "name", ".", "Source", "methods", "are", "identified", ".", "by", "argspec", "a", "list", "of", "argument", "specifiers", ".", "So", "for", "e", ".", "g", ".", "an", "argpsec", "of", ":", "code", ":", "[[", "self", "context", "]", "[", "s", "c", "]]", "would", "match", "methods", "looking", "like", ":" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/source_provider.py#L59-L92
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/source_provider.py
SourceProvider.sources
def sources(self): """ Returns a dictionary of source methods found on this object, keyed on method name. Source methods are identified by (self, context) arguments on this object. For example: .. code-block:: python def f(self, context): ... is a source method, but .. code-block:: python def f(self, ctx): ... is not. """ try: return self._sources except AttributeError: self._sources = find_sources(self) return self._sources
python
def sources(self): """ Returns a dictionary of source methods found on this object, keyed on method name. Source methods are identified by (self, context) arguments on this object. For example: .. code-block:: python def f(self, context): ... is a source method, but .. code-block:: python def f(self, ctx): ... is not. """ try: return self._sources except AttributeError: self._sources = find_sources(self) return self._sources
[ "def", "sources", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_sources", "except", "AttributeError", ":", "self", ".", "_sources", "=", "find_sources", "(", "self", ")", "return", "self", ".", "_sources" ]
Returns a dictionary of source methods found on this object, keyed on method name. Source methods are identified by (self, context) arguments on this object. For example: .. code-block:: python def f(self, context): ... is a source method, but .. code-block:: python def f(self, ctx): ... is not.
[ "Returns", "a", "dictionary", "of", "source", "methods", "found", "on", "this", "object", "keyed", "on", "method", "name", ".", "Source", "methods", "are", "identified", "by", "(", "self", "context", ")", "arguments", "on", "this", "object", ".", "For", "example", ":" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/source_provider.py#L113-L140
ska-sa/montblanc
montblanc/util/parallactic_angles.py
parallactic_angles
def parallactic_angles(times, antenna_positions, field_centre): """ Computes parallactic angles per timestep for the given reference antenna position and field centre. Arguments: times: ndarray Array of unique times with shape (ntime,), obtained from TIME column of MS table antenna_positions: ndarray of shape (na, 3) Antenna positions, obtained from POSITION column of MS ANTENNA sub-table field_centre : ndarray of shape (2,) Field centre, should be obtained from MS PHASE_DIR Returns: An array of parallactic angles per time-step """ import pyrap.quanta as pq try: # Create direction measure for the zenith zenith = pm.direction('AZEL','0deg','90deg') except AttributeError as e: if pm is None: raise ImportError("python-casacore import failed") raise # Create position measures for each antenna reference_positions = [pm.position('itrf', *(pq.quantity(x,'m') for x in pos)) for pos in antenna_positions] # Compute field centre in radians fc_rad = pm.direction('J2000', *(pq.quantity(f,'rad') for f in field_centre)) return np.asarray([ # Set current time as the reference frame pm.do_frame(pm.epoch("UTC", pq.quantity(t, "s"))) and [ # Set antenna position as the reference frame pm.do_frame(rp) and pm.posangle(fc_rad, zenith).get_value("rad") for rp in reference_positions ] for t in times])
python
def parallactic_angles(times, antenna_positions, field_centre): """ Computes parallactic angles per timestep for the given reference antenna position and field centre. Arguments: times: ndarray Array of unique times with shape (ntime,), obtained from TIME column of MS table antenna_positions: ndarray of shape (na, 3) Antenna positions, obtained from POSITION column of MS ANTENNA sub-table field_centre : ndarray of shape (2,) Field centre, should be obtained from MS PHASE_DIR Returns: An array of parallactic angles per time-step """ import pyrap.quanta as pq try: # Create direction measure for the zenith zenith = pm.direction('AZEL','0deg','90deg') except AttributeError as e: if pm is None: raise ImportError("python-casacore import failed") raise # Create position measures for each antenna reference_positions = [pm.position('itrf', *(pq.quantity(x,'m') for x in pos)) for pos in antenna_positions] # Compute field centre in radians fc_rad = pm.direction('J2000', *(pq.quantity(f,'rad') for f in field_centre)) return np.asarray([ # Set current time as the reference frame pm.do_frame(pm.epoch("UTC", pq.quantity(t, "s"))) and [ # Set antenna position as the reference frame pm.do_frame(rp) and pm.posangle(fc_rad, zenith).get_value("rad") for rp in reference_positions ] for t in times])
[ "def", "parallactic_angles", "(", "times", ",", "antenna_positions", ",", "field_centre", ")", ":", "import", "pyrap", ".", "quanta", "as", "pq", "try", ":", "# Create direction measure for the zenith", "zenith", "=", "pm", ".", "direction", "(", "'AZEL'", ",", "'0deg'", ",", "'90deg'", ")", "except", "AttributeError", "as", "e", ":", "if", "pm", "is", "None", ":", "raise", "ImportError", "(", "\"python-casacore import failed\"", ")", "raise", "# Create position measures for each antenna", "reference_positions", "=", "[", "pm", ".", "position", "(", "'itrf'", ",", "*", "(", "pq", ".", "quantity", "(", "x", ",", "'m'", ")", "for", "x", "in", "pos", ")", ")", "for", "pos", "in", "antenna_positions", "]", "# Compute field centre in radians", "fc_rad", "=", "pm", ".", "direction", "(", "'J2000'", ",", "*", "(", "pq", ".", "quantity", "(", "f", ",", "'rad'", ")", "for", "f", "in", "field_centre", ")", ")", "return", "np", ".", "asarray", "(", "[", "# Set current time as the reference frame", "pm", ".", "do_frame", "(", "pm", ".", "epoch", "(", "\"UTC\"", ",", "pq", ".", "quantity", "(", "t", ",", "\"s\"", ")", ")", ")", "and", "[", "# Set antenna position as the reference frame", "pm", ".", "do_frame", "(", "rp", ")", "and", "pm", ".", "posangle", "(", "fc_rad", ",", "zenith", ")", ".", "get_value", "(", "\"rad\"", ")", "for", "rp", "in", "reference_positions", "]", "for", "t", "in", "times", "]", ")" ]
Computes parallactic angles per timestep for the given reference antenna position and field centre. Arguments: times: ndarray Array of unique times with shape (ntime,), obtained from TIME column of MS table antenna_positions: ndarray of shape (na, 3) Antenna positions, obtained from POSITION column of MS ANTENNA sub-table field_centre : ndarray of shape (2,) Field centre, should be obtained from MS PHASE_DIR Returns: An array of parallactic angles per time-step
[ "Computes", "parallactic", "angles", "per", "timestep", "for", "the", "given", "reference", "antenna", "position", "and", "field", "centre", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/parallactic_angles.py#L34-L83
ska-sa/montblanc
montblanc/logsetup.py
setup_logging
def setup_logging(): """ Setup logging configuration """ # Console formatter, mention name cfmt = logging.Formatter(('%(name)s - %(levelname)s - %(message)s')) # File formatter, mention time ffmt = logging.Formatter(('%(asctime)s - %(levelname)s - %(message)s')) # Console handler ch = logging.StreamHandler() ch.setLevel(logging.INFO) ch.setFormatter(cfmt) # File handler fh = logging.handlers.RotatingFileHandler('montblanc.log', maxBytes=10*1024*1024, backupCount=10) fh.setLevel(logging.INFO) fh.setFormatter(ffmt) # Create the logger, # adding the console and file handler mb_logger = logging.getLogger('montblanc') mb_logger.handlers = [] mb_logger.setLevel(logging.DEBUG) mb_logger.addHandler(ch) mb_logger.addHandler(fh) # Set up the concurrent.futures logger cf_logger = logging.getLogger('concurrent.futures') cf_logger.setLevel(logging.DEBUG) cf_logger.addHandler(ch) cf_logger.addHandler(fh) return mb_logger
python
def setup_logging(): """ Setup logging configuration """ # Console formatter, mention name cfmt = logging.Formatter(('%(name)s - %(levelname)s - %(message)s')) # File formatter, mention time ffmt = logging.Formatter(('%(asctime)s - %(levelname)s - %(message)s')) # Console handler ch = logging.StreamHandler() ch.setLevel(logging.INFO) ch.setFormatter(cfmt) # File handler fh = logging.handlers.RotatingFileHandler('montblanc.log', maxBytes=10*1024*1024, backupCount=10) fh.setLevel(logging.INFO) fh.setFormatter(ffmt) # Create the logger, # adding the console and file handler mb_logger = logging.getLogger('montblanc') mb_logger.handlers = [] mb_logger.setLevel(logging.DEBUG) mb_logger.addHandler(ch) mb_logger.addHandler(fh) # Set up the concurrent.futures logger cf_logger = logging.getLogger('concurrent.futures') cf_logger.setLevel(logging.DEBUG) cf_logger.addHandler(ch) cf_logger.addHandler(fh) return mb_logger
[ "def", "setup_logging", "(", ")", ":", "# Console formatter, mention name", "cfmt", "=", "logging", ".", "Formatter", "(", "(", "'%(name)s - %(levelname)s - %(message)s'", ")", ")", "# File formatter, mention time", "ffmt", "=", "logging", ".", "Formatter", "(", "(", "'%(asctime)s - %(levelname)s - %(message)s'", ")", ")", "# Console handler", "ch", "=", "logging", ".", "StreamHandler", "(", ")", "ch", ".", "setLevel", "(", "logging", ".", "INFO", ")", "ch", ".", "setFormatter", "(", "cfmt", ")", "# File handler", "fh", "=", "logging", ".", "handlers", ".", "RotatingFileHandler", "(", "'montblanc.log'", ",", "maxBytes", "=", "10", "*", "1024", "*", "1024", ",", "backupCount", "=", "10", ")", "fh", ".", "setLevel", "(", "logging", ".", "INFO", ")", "fh", ".", "setFormatter", "(", "ffmt", ")", "# Create the logger,", "# adding the console and file handler", "mb_logger", "=", "logging", ".", "getLogger", "(", "'montblanc'", ")", "mb_logger", ".", "handlers", "=", "[", "]", "mb_logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "mb_logger", ".", "addHandler", "(", "ch", ")", "mb_logger", ".", "addHandler", "(", "fh", ")", "# Set up the concurrent.futures logger", "cf_logger", "=", "logging", ".", "getLogger", "(", "'concurrent.futures'", ")", "cf_logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "cf_logger", ".", "addHandler", "(", "ch", ")", "cf_logger", ".", "addHandler", "(", "fh", ")", "return", "mb_logger" ]
Setup logging configuration
[ "Setup", "logging", "configuration" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/logsetup.py#L24-L58
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/defaults_source_provider.py
constant_cache
def constant_cache(method): """ Caches constant arrays associated with an array name. The intent of this decorator is to avoid the cost of recreating and storing many arrays of constant data, especially data created by np.zeros or np.ones. Instead, a single array of the first given shape is created and any further requests for constant data of the same (or smaller) shape are served from the cache. Requests for larger shapes or different types are regarded as a cache miss and will result in replacement of the existing cache value. """ @functools.wraps(method) def wrapper(self, context): # Defer to method if no caching is enabled if not self._is_cached: return method(self, context) name = context.name cached = self._constant_cache.get(name, None) # No cached value, call method and return if cached is None: data = self._constant_cache[name] = method(self, context) return data # Can we just slice the existing cache entry? # 1. Are all context.shape's entries less than or equal # to the shape of the cached data? # 2. Do they have the same dtype? cached_ok = (cached.dtype == context.dtype and all(l <= r for l,r in zip(context.shape, cached.shape))) # Need to return something bigger or a different type if not cached_ok: data = self._constant_cache[name] = method(self, context) return data # Otherwise slice the cached data return cached[tuple(slice(0, s) for s in context.shape)] f = wrapper f.__decorator__ = constant_cache.__name__ return f
python
def constant_cache(method): """ Caches constant arrays associated with an array name. The intent of this decorator is to avoid the cost of recreating and storing many arrays of constant data, especially data created by np.zeros or np.ones. Instead, a single array of the first given shape is created and any further requests for constant data of the same (or smaller) shape are served from the cache. Requests for larger shapes or different types are regarded as a cache miss and will result in replacement of the existing cache value. """ @functools.wraps(method) def wrapper(self, context): # Defer to method if no caching is enabled if not self._is_cached: return method(self, context) name = context.name cached = self._constant_cache.get(name, None) # No cached value, call method and return if cached is None: data = self._constant_cache[name] = method(self, context) return data # Can we just slice the existing cache entry? # 1. Are all context.shape's entries less than or equal # to the shape of the cached data? # 2. Do they have the same dtype? cached_ok = (cached.dtype == context.dtype and all(l <= r for l,r in zip(context.shape, cached.shape))) # Need to return something bigger or a different type if not cached_ok: data = self._constant_cache[name] = method(self, context) return data # Otherwise slice the cached data return cached[tuple(slice(0, s) for s in context.shape)] f = wrapper f.__decorator__ = constant_cache.__name__ return f
[ "def", "constant_cache", "(", "method", ")", ":", "@", "functools", ".", "wraps", "(", "method", ")", "def", "wrapper", "(", "self", ",", "context", ")", ":", "# Defer to method if no caching is enabled", "if", "not", "self", ".", "_is_cached", ":", "return", "method", "(", "self", ",", "context", ")", "name", "=", "context", ".", "name", "cached", "=", "self", ".", "_constant_cache", ".", "get", "(", "name", ",", "None", ")", "# No cached value, call method and return", "if", "cached", "is", "None", ":", "data", "=", "self", ".", "_constant_cache", "[", "name", "]", "=", "method", "(", "self", ",", "context", ")", "return", "data", "# Can we just slice the existing cache entry?", "# 1. Are all context.shape's entries less than or equal", "# to the shape of the cached data?", "# 2. Do they have the same dtype?", "cached_ok", "=", "(", "cached", ".", "dtype", "==", "context", ".", "dtype", "and", "all", "(", "l", "<=", "r", "for", "l", ",", "r", "in", "zip", "(", "context", ".", "shape", ",", "cached", ".", "shape", ")", ")", ")", "# Need to return something bigger or a different type", "if", "not", "cached_ok", ":", "data", "=", "self", ".", "_constant_cache", "[", "name", "]", "=", "method", "(", "self", ",", "context", ")", "return", "data", "# Otherwise slice the cached data", "return", "cached", "[", "tuple", "(", "slice", "(", "0", ",", "s", ")", "for", "s", "in", "context", ".", "shape", ")", "]", "f", "=", "wrapper", "f", ".", "__decorator__", "=", "constant_cache", ".", "__name__", "return", "f" ]
Caches constant arrays associated with an array name. The intent of this decorator is to avoid the cost of recreating and storing many arrays of constant data, especially data created by np.zeros or np.ones. Instead, a single array of the first given shape is created and any further requests for constant data of the same (or smaller) shape are served from the cache. Requests for larger shapes or different types are regarded as a cache miss and will result in replacement of the existing cache value.
[ "Caches", "constant", "arrays", "associated", "with", "an", "array", "name", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/defaults_source_provider.py#L30-L77
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/defaults_source_provider.py
chunk_cache
def chunk_cache(method): """ Caches chunks of default data. This decorator caches generated default data so as to avoid recomputing it on a subsequent queries to the provider. """ @functools.wraps(method) def wrapper(self, context): # Defer to the method if no caching is enabled if not self._is_cached: return method(self, context) # Construct the key for the given index name = context.name idx = context.array_extents(name) key = tuple(i for t in idx for i in t) # Access the sub-cache for this array array_cache = self._chunk_cache[name] # Cache miss, call the function if key not in array_cache: array_cache[key] = method(self, context) return array_cache[key] f = wrapper f.__decorator__ = chunk_cache.__name__ return f
python
def chunk_cache(method): """ Caches chunks of default data. This decorator caches generated default data so as to avoid recomputing it on a subsequent queries to the provider. """ @functools.wraps(method) def wrapper(self, context): # Defer to the method if no caching is enabled if not self._is_cached: return method(self, context) # Construct the key for the given index name = context.name idx = context.array_extents(name) key = tuple(i for t in idx for i in t) # Access the sub-cache for this array array_cache = self._chunk_cache[name] # Cache miss, call the function if key not in array_cache: array_cache[key] = method(self, context) return array_cache[key] f = wrapper f.__decorator__ = chunk_cache.__name__ return f
[ "def", "chunk_cache", "(", "method", ")", ":", "@", "functools", ".", "wraps", "(", "method", ")", "def", "wrapper", "(", "self", ",", "context", ")", ":", "# Defer to the method if no caching is enabled", "if", "not", "self", ".", "_is_cached", ":", "return", "method", "(", "self", ",", "context", ")", "# Construct the key for the given index", "name", "=", "context", ".", "name", "idx", "=", "context", ".", "array_extents", "(", "name", ")", "key", "=", "tuple", "(", "i", "for", "t", "in", "idx", "for", "i", "in", "t", ")", "# Access the sub-cache for this array", "array_cache", "=", "self", ".", "_chunk_cache", "[", "name", "]", "# Cache miss, call the function", "if", "key", "not", "in", "array_cache", ":", "array_cache", "[", "key", "]", "=", "method", "(", "self", ",", "context", ")", "return", "array_cache", "[", "key", "]", "f", "=", "wrapper", "f", ".", "__decorator__", "=", "chunk_cache", ".", "__name__", "return", "f" ]
Caches chunks of default data. This decorator caches generated default data so as to avoid recomputing it on a subsequent queries to the provider.
[ "Caches", "chunks", "of", "default", "data", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/defaults_source_provider.py#L79-L109
ska-sa/montblanc
montblanc/impl/rime/tensorflow/RimeSolver.py
_create_defaults_source_provider
def _create_defaults_source_provider(cube, data_source): """ Create a DefaultsSourceProvider object. This provides default data sources for each array defined on the hypercube. The data sources may either by obtained from the arrays 'default' data source or the 'test' data source. """ from montblanc.impl.rime.tensorflow.sources import ( find_sources, DEFAULT_ARGSPEC) from montblanc.impl.rime.tensorflow.sources import constant_cache # Obtain default data sources for each array, # Just take from defaults if test data isn't specified staging_area_data_source = ('default' if not data_source == 'test' else data_source) cache = True default_prov = DefaultsSourceProvider(cache=cache) # Create data sources on the source provider from # the cube array data sources for n, a in cube.arrays().iteritems(): # Unnecessary for temporary arrays if 'temporary' in a.tags: continue # Obtain the data source data_source = a.get(staging_area_data_source) # Array marked as constant, decorate the data source # with a constant caching decorator if cache is True and 'constant' in a.tags: data_source = constant_cache(data_source) method = types.MethodType(data_source, default_prov) setattr(default_prov, n, method) def _sources(self): """ Override the sources method to also handle lambdas that look like lambda s, c: ..., as defined in the config module """ try: return self._sources except AttributeError: self._sources = find_sources(self, [DEFAULT_ARGSPEC] + [['s', 'c']]) return self._sources # Monkey patch the sources method default_prov.sources = types.MethodType(_sources, default_prov) return default_prov
python
def _create_defaults_source_provider(cube, data_source): """ Create a DefaultsSourceProvider object. This provides default data sources for each array defined on the hypercube. The data sources may either by obtained from the arrays 'default' data source or the 'test' data source. """ from montblanc.impl.rime.tensorflow.sources import ( find_sources, DEFAULT_ARGSPEC) from montblanc.impl.rime.tensorflow.sources import constant_cache # Obtain default data sources for each array, # Just take from defaults if test data isn't specified staging_area_data_source = ('default' if not data_source == 'test' else data_source) cache = True default_prov = DefaultsSourceProvider(cache=cache) # Create data sources on the source provider from # the cube array data sources for n, a in cube.arrays().iteritems(): # Unnecessary for temporary arrays if 'temporary' in a.tags: continue # Obtain the data source data_source = a.get(staging_area_data_source) # Array marked as constant, decorate the data source # with a constant caching decorator if cache is True and 'constant' in a.tags: data_source = constant_cache(data_source) method = types.MethodType(data_source, default_prov) setattr(default_prov, n, method) def _sources(self): """ Override the sources method to also handle lambdas that look like lambda s, c: ..., as defined in the config module """ try: return self._sources except AttributeError: self._sources = find_sources(self, [DEFAULT_ARGSPEC] + [['s', 'c']]) return self._sources # Monkey patch the sources method default_prov.sources = types.MethodType(_sources, default_prov) return default_prov
[ "def", "_create_defaults_source_provider", "(", "cube", ",", "data_source", ")", ":", "from", "montblanc", ".", "impl", ".", "rime", ".", "tensorflow", ".", "sources", "import", "(", "find_sources", ",", "DEFAULT_ARGSPEC", ")", "from", "montblanc", ".", "impl", ".", "rime", ".", "tensorflow", ".", "sources", "import", "constant_cache", "# Obtain default data sources for each array,", "# Just take from defaults if test data isn't specified", "staging_area_data_source", "=", "(", "'default'", "if", "not", "data_source", "==", "'test'", "else", "data_source", ")", "cache", "=", "True", "default_prov", "=", "DefaultsSourceProvider", "(", "cache", "=", "cache", ")", "# Create data sources on the source provider from", "# the cube array data sources", "for", "n", ",", "a", "in", "cube", ".", "arrays", "(", ")", ".", "iteritems", "(", ")", ":", "# Unnecessary for temporary arrays", "if", "'temporary'", "in", "a", ".", "tags", ":", "continue", "# Obtain the data source", "data_source", "=", "a", ".", "get", "(", "staging_area_data_source", ")", "# Array marked as constant, decorate the data source", "# with a constant caching decorator", "if", "cache", "is", "True", "and", "'constant'", "in", "a", ".", "tags", ":", "data_source", "=", "constant_cache", "(", "data_source", ")", "method", "=", "types", ".", "MethodType", "(", "data_source", ",", "default_prov", ")", "setattr", "(", "default_prov", ",", "n", ",", "method", ")", "def", "_sources", "(", "self", ")", ":", "\"\"\"\n Override the sources method to also handle lambdas that look like\n lambda s, c: ..., as defined in the config module\n \"\"\"", "try", ":", "return", "self", ".", "_sources", "except", "AttributeError", ":", "self", ".", "_sources", "=", "find_sources", "(", "self", ",", "[", "DEFAULT_ARGSPEC", "]", "+", "[", "[", "'s'", ",", "'c'", "]", "]", ")", "return", "self", ".", "_sources", "# Monkey patch the sources method", "default_prov", ".", "sources", "=", "types", ".", "MethodType", "(", "_sources", ",", "default_prov", ")", "return", "default_prov" ]
Create a DefaultsSourceProvider object. This provides default data sources for each array defined on the hypercube. The data sources may either by obtained from the arrays 'default' data source or the 'test' data source.
[ "Create", "a", "DefaultsSourceProvider", "object", ".", "This", "provides", "default", "data", "sources", "for", "each", "array", "defined", "on", "the", "hypercube", ".", "The", "data", "sources", "may", "either", "by", "obtained", "from", "the", "arrays", "default", "data", "source", "or", "the", "test", "data", "source", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/RimeSolver.py#L758-L812
ska-sa/montblanc
montblanc/impl/rime/tensorflow/RimeSolver.py
_construct_tensorflow_expression
def _construct_tensorflow_expression(slvr_cfg, feed_data, device, shard): """ Constructs a tensorflow expression for computing the RIME """ zero = tf.constant(0) src_count = zero src_ph_vars = feed_data.src_ph_vars LSA = feed_data.local polarisation_type = slvr_cfg['polarisation_type'] # Pull RIME inputs out of the feed staging_area # of the relevant shard, adding the feed once # inputs to the dictionary D = LSA.feed_many[shard].get_to_attrdict() D.update({k: fo.var for k, fo in LSA.feed_once.iteritems()}) with tf.device(device): # Infer chunk dimensions model_vis_shape = tf.shape(D.model_vis) ntime, nbl, nchan, npol = [model_vis_shape[i] for i in range(4)] # Infer float and complex type FT, CT = D.uvw.dtype, D.model_vis.dtype # Compute sine and cosine of parallactic angles # for the beam beam_sin, beam_cos = rime.parallactic_angle_sin_cos( D.parallactic_angles) # Compute sine and cosine of feed rotation angle feed_sin, feed_cos = rime.parallactic_angle_sin_cos( D.parallactic_angles[:, :] + D.feed_angles[None, :]) # Compute feed rotation feed_rotation = rime.feed_rotation(feed_sin, feed_cos, CT=CT, feed_type=polarisation_type) def antenna_jones(lm, stokes, alpha, ref_freq): """ Compute the jones terms for each antenna. lm, stokes and alpha are the source variables. """ # Compute the complex phase cplx_phase = rime.phase(lm, D.uvw, D.frequency, CT=CT) # Check for nans/infs in the complex phase phase_msg = ("Check that '1 - l**2 - m**2 >= 0' holds " "for all your lm coordinates. This is required " "for 'n = sqrt(1 - l**2 - m**2) - 1' " "to be finite.") phase_real = tf.check_numerics(tf.real(cplx_phase), phase_msg) phase_imag = tf.check_numerics(tf.imag(cplx_phase), phase_msg) # Compute the square root of the brightness matrix # (as well as the sign) bsqrt, sgn_brightness = rime.b_sqrt(stokes, alpha, D.frequency, ref_freq, CT=CT, polarisation_type=polarisation_type) # Check for nans/infs in the bsqrt bsqrt_msg = ("Check that your stokes parameters " "satisfy I**2 >= Q**2 + U**2 + V**2. " "Montblanc performs a cholesky decomposition " "of the brightness matrix and the above must " "hold for this to produce valid values.") bsqrt_real = tf.check_numerics(tf.real(bsqrt), bsqrt_msg) bsqrt_imag = tf.check_numerics(tf.imag(bsqrt), bsqrt_msg) # Compute the direction dependent effects from the beam ejones = rime.e_beam(lm, D.frequency, D.pointing_errors, D.antenna_scaling, beam_sin, beam_cos, D.beam_extents, D.beam_freq_map, D.ebeam) deps = [phase_real, phase_imag, bsqrt_real, bsqrt_imag] deps = [] # Do nothing for now # Combine the brightness square root, complex phase, # feed rotation and beam dde's with tf.control_dependencies(deps): antenna_jones = rime.create_antenna_jones(bsqrt, cplx_phase, feed_rotation, ejones, FT=FT) return antenna_jones, sgn_brightness # While loop condition for each point source type def point_cond(coherencies, npsrc, src_count): return tf.less(npsrc, src_ph_vars.npsrc) def gaussian_cond(coherencies, ngsrc, src_count): return tf.less(ngsrc, src_ph_vars.ngsrc) def sersic_cond(coherencies, nssrc, src_count): return tf.less(nssrc, src_ph_vars.nssrc) # While loop bodies def point_body(coherencies, npsrc, src_count): """ Accumulate visiblities for point source batch """ S = LSA.sources['npsrc'][shard].get_to_attrdict() # Maintain source counts nsrc = tf.shape(S.point_lm)[0] src_count += nsrc npsrc += nsrc ant_jones, sgn_brightness = antenna_jones(S.point_lm, S.point_stokes, S.point_alpha, S.point_ref_freq) shape = tf.ones(shape=[nsrc,ntime,nbl,nchan], dtype=FT) coherencies = rime.sum_coherencies(D.antenna1, D.antenna2, shape, ant_jones, sgn_brightness, coherencies) return coherencies, npsrc, src_count def gaussian_body(coherencies, ngsrc, src_count): """ Accumulate coherencies for gaussian source batch """ S = LSA.sources['ngsrc'][shard].get_to_attrdict() # Maintain source counts nsrc = tf.shape(S.gaussian_lm)[0] src_count += nsrc ngsrc += nsrc ant_jones, sgn_brightness = antenna_jones(S.gaussian_lm, S.gaussian_stokes, S.gaussian_alpha, S.gaussian_ref_freq) gauss_shape = rime.gauss_shape(D.uvw, D.antenna1, D.antenna2, D.frequency, S.gaussian_shape) coherencies = rime.sum_coherencies(D.antenna1, D.antenna2, gauss_shape, ant_jones, sgn_brightness, coherencies) return coherencies, ngsrc, src_count def sersic_body(coherencies, nssrc, src_count): """ Accumulate coherencies for sersic source batch """ S = LSA.sources['nssrc'][shard].get_to_attrdict() # Maintain source counts nsrc = tf.shape(S.sersic_lm)[0] src_count += nsrc nssrc += nsrc ant_jones, sgn_brightness = antenna_jones(S.sersic_lm, S.sersic_stokes, S.sersic_alpha, S.sersic_ref_freq) sersic_shape = rime.sersic_shape(D.uvw, D.antenna1, D.antenna2, D.frequency, S.sersic_shape) coherencies = rime.sum_coherencies(D.antenna1, D.antenna2, sersic_shape, ant_jones, sgn_brightness, coherencies) return coherencies, nssrc, src_count with tf.device(device): base_coherencies = tf.zeros(shape=[ntime,nbl,nchan,npol], dtype=CT) # Evaluate point sources summed_coherencies, npsrc, src_count = tf.while_loop( point_cond, point_body, [base_coherencies, zero, src_count]) # Evaluate gaussians summed_coherencies, ngsrc, src_count = tf.while_loop( gaussian_cond, gaussian_body, [summed_coherencies, zero, src_count]) # Evaluate sersics summed_coherencies, nssrc, src_count = tf.while_loop( sersic_cond, sersic_body, [summed_coherencies, zero, src_count]) # Post process visibilities to produce model visibilites and chi squared model_vis, chi_squared = rime.post_process_visibilities( D.antenna1, D.antenna2, D.direction_independent_effects, D.flag, D.weight, D.model_vis, summed_coherencies, D.observed_vis) # Create enstaging_area operation put_op = LSA.output.put_from_list([D.descriptor, model_vis, chi_squared]) # Return descriptor and enstaging_area operation return D.descriptor, put_op
python
def _construct_tensorflow_expression(slvr_cfg, feed_data, device, shard): """ Constructs a tensorflow expression for computing the RIME """ zero = tf.constant(0) src_count = zero src_ph_vars = feed_data.src_ph_vars LSA = feed_data.local polarisation_type = slvr_cfg['polarisation_type'] # Pull RIME inputs out of the feed staging_area # of the relevant shard, adding the feed once # inputs to the dictionary D = LSA.feed_many[shard].get_to_attrdict() D.update({k: fo.var for k, fo in LSA.feed_once.iteritems()}) with tf.device(device): # Infer chunk dimensions model_vis_shape = tf.shape(D.model_vis) ntime, nbl, nchan, npol = [model_vis_shape[i] for i in range(4)] # Infer float and complex type FT, CT = D.uvw.dtype, D.model_vis.dtype # Compute sine and cosine of parallactic angles # for the beam beam_sin, beam_cos = rime.parallactic_angle_sin_cos( D.parallactic_angles) # Compute sine and cosine of feed rotation angle feed_sin, feed_cos = rime.parallactic_angle_sin_cos( D.parallactic_angles[:, :] + D.feed_angles[None, :]) # Compute feed rotation feed_rotation = rime.feed_rotation(feed_sin, feed_cos, CT=CT, feed_type=polarisation_type) def antenna_jones(lm, stokes, alpha, ref_freq): """ Compute the jones terms for each antenna. lm, stokes and alpha are the source variables. """ # Compute the complex phase cplx_phase = rime.phase(lm, D.uvw, D.frequency, CT=CT) # Check for nans/infs in the complex phase phase_msg = ("Check that '1 - l**2 - m**2 >= 0' holds " "for all your lm coordinates. This is required " "for 'n = sqrt(1 - l**2 - m**2) - 1' " "to be finite.") phase_real = tf.check_numerics(tf.real(cplx_phase), phase_msg) phase_imag = tf.check_numerics(tf.imag(cplx_phase), phase_msg) # Compute the square root of the brightness matrix # (as well as the sign) bsqrt, sgn_brightness = rime.b_sqrt(stokes, alpha, D.frequency, ref_freq, CT=CT, polarisation_type=polarisation_type) # Check for nans/infs in the bsqrt bsqrt_msg = ("Check that your stokes parameters " "satisfy I**2 >= Q**2 + U**2 + V**2. " "Montblanc performs a cholesky decomposition " "of the brightness matrix and the above must " "hold for this to produce valid values.") bsqrt_real = tf.check_numerics(tf.real(bsqrt), bsqrt_msg) bsqrt_imag = tf.check_numerics(tf.imag(bsqrt), bsqrt_msg) # Compute the direction dependent effects from the beam ejones = rime.e_beam(lm, D.frequency, D.pointing_errors, D.antenna_scaling, beam_sin, beam_cos, D.beam_extents, D.beam_freq_map, D.ebeam) deps = [phase_real, phase_imag, bsqrt_real, bsqrt_imag] deps = [] # Do nothing for now # Combine the brightness square root, complex phase, # feed rotation and beam dde's with tf.control_dependencies(deps): antenna_jones = rime.create_antenna_jones(bsqrt, cplx_phase, feed_rotation, ejones, FT=FT) return antenna_jones, sgn_brightness # While loop condition for each point source type def point_cond(coherencies, npsrc, src_count): return tf.less(npsrc, src_ph_vars.npsrc) def gaussian_cond(coherencies, ngsrc, src_count): return tf.less(ngsrc, src_ph_vars.ngsrc) def sersic_cond(coherencies, nssrc, src_count): return tf.less(nssrc, src_ph_vars.nssrc) # While loop bodies def point_body(coherencies, npsrc, src_count): """ Accumulate visiblities for point source batch """ S = LSA.sources['npsrc'][shard].get_to_attrdict() # Maintain source counts nsrc = tf.shape(S.point_lm)[0] src_count += nsrc npsrc += nsrc ant_jones, sgn_brightness = antenna_jones(S.point_lm, S.point_stokes, S.point_alpha, S.point_ref_freq) shape = tf.ones(shape=[nsrc,ntime,nbl,nchan], dtype=FT) coherencies = rime.sum_coherencies(D.antenna1, D.antenna2, shape, ant_jones, sgn_brightness, coherencies) return coherencies, npsrc, src_count def gaussian_body(coherencies, ngsrc, src_count): """ Accumulate coherencies for gaussian source batch """ S = LSA.sources['ngsrc'][shard].get_to_attrdict() # Maintain source counts nsrc = tf.shape(S.gaussian_lm)[0] src_count += nsrc ngsrc += nsrc ant_jones, sgn_brightness = antenna_jones(S.gaussian_lm, S.gaussian_stokes, S.gaussian_alpha, S.gaussian_ref_freq) gauss_shape = rime.gauss_shape(D.uvw, D.antenna1, D.antenna2, D.frequency, S.gaussian_shape) coherencies = rime.sum_coherencies(D.antenna1, D.antenna2, gauss_shape, ant_jones, sgn_brightness, coherencies) return coherencies, ngsrc, src_count def sersic_body(coherencies, nssrc, src_count): """ Accumulate coherencies for sersic source batch """ S = LSA.sources['nssrc'][shard].get_to_attrdict() # Maintain source counts nsrc = tf.shape(S.sersic_lm)[0] src_count += nsrc nssrc += nsrc ant_jones, sgn_brightness = antenna_jones(S.sersic_lm, S.sersic_stokes, S.sersic_alpha, S.sersic_ref_freq) sersic_shape = rime.sersic_shape(D.uvw, D.antenna1, D.antenna2, D.frequency, S.sersic_shape) coherencies = rime.sum_coherencies(D.antenna1, D.antenna2, sersic_shape, ant_jones, sgn_brightness, coherencies) return coherencies, nssrc, src_count with tf.device(device): base_coherencies = tf.zeros(shape=[ntime,nbl,nchan,npol], dtype=CT) # Evaluate point sources summed_coherencies, npsrc, src_count = tf.while_loop( point_cond, point_body, [base_coherencies, zero, src_count]) # Evaluate gaussians summed_coherencies, ngsrc, src_count = tf.while_loop( gaussian_cond, gaussian_body, [summed_coherencies, zero, src_count]) # Evaluate sersics summed_coherencies, nssrc, src_count = tf.while_loop( sersic_cond, sersic_body, [summed_coherencies, zero, src_count]) # Post process visibilities to produce model visibilites and chi squared model_vis, chi_squared = rime.post_process_visibilities( D.antenna1, D.antenna2, D.direction_independent_effects, D.flag, D.weight, D.model_vis, summed_coherencies, D.observed_vis) # Create enstaging_area operation put_op = LSA.output.put_from_list([D.descriptor, model_vis, chi_squared]) # Return descriptor and enstaging_area operation return D.descriptor, put_op
[ "def", "_construct_tensorflow_expression", "(", "slvr_cfg", ",", "feed_data", ",", "device", ",", "shard", ")", ":", "zero", "=", "tf", ".", "constant", "(", "0", ")", "src_count", "=", "zero", "src_ph_vars", "=", "feed_data", ".", "src_ph_vars", "LSA", "=", "feed_data", ".", "local", "polarisation_type", "=", "slvr_cfg", "[", "'polarisation_type'", "]", "# Pull RIME inputs out of the feed staging_area", "# of the relevant shard, adding the feed once", "# inputs to the dictionary", "D", "=", "LSA", ".", "feed_many", "[", "shard", "]", ".", "get_to_attrdict", "(", ")", "D", ".", "update", "(", "{", "k", ":", "fo", ".", "var", "for", "k", ",", "fo", "in", "LSA", ".", "feed_once", ".", "iteritems", "(", ")", "}", ")", "with", "tf", ".", "device", "(", "device", ")", ":", "# Infer chunk dimensions", "model_vis_shape", "=", "tf", ".", "shape", "(", "D", ".", "model_vis", ")", "ntime", ",", "nbl", ",", "nchan", ",", "npol", "=", "[", "model_vis_shape", "[", "i", "]", "for", "i", "in", "range", "(", "4", ")", "]", "# Infer float and complex type", "FT", ",", "CT", "=", "D", ".", "uvw", ".", "dtype", ",", "D", ".", "model_vis", ".", "dtype", "# Compute sine and cosine of parallactic angles", "# for the beam", "beam_sin", ",", "beam_cos", "=", "rime", ".", "parallactic_angle_sin_cos", "(", "D", ".", "parallactic_angles", ")", "# Compute sine and cosine of feed rotation angle", "feed_sin", ",", "feed_cos", "=", "rime", ".", "parallactic_angle_sin_cos", "(", "D", ".", "parallactic_angles", "[", ":", ",", ":", "]", "+", "D", ".", "feed_angles", "[", "None", ",", ":", "]", ")", "# Compute feed rotation", "feed_rotation", "=", "rime", ".", "feed_rotation", "(", "feed_sin", ",", "feed_cos", ",", "CT", "=", "CT", ",", "feed_type", "=", "polarisation_type", ")", "def", "antenna_jones", "(", "lm", ",", "stokes", ",", "alpha", ",", "ref_freq", ")", ":", "\"\"\"\n Compute the jones terms for each antenna.\n\n lm, stokes and alpha are the source variables.\n \"\"\"", "# Compute the complex phase", "cplx_phase", "=", "rime", ".", "phase", "(", "lm", ",", "D", ".", "uvw", ",", "D", ".", "frequency", ",", "CT", "=", "CT", ")", "# Check for nans/infs in the complex phase", "phase_msg", "=", "(", "\"Check that '1 - l**2 - m**2 >= 0' holds \"", "\"for all your lm coordinates. This is required \"", "\"for 'n = sqrt(1 - l**2 - m**2) - 1' \"", "\"to be finite.\"", ")", "phase_real", "=", "tf", ".", "check_numerics", "(", "tf", ".", "real", "(", "cplx_phase", ")", ",", "phase_msg", ")", "phase_imag", "=", "tf", ".", "check_numerics", "(", "tf", ".", "imag", "(", "cplx_phase", ")", ",", "phase_msg", ")", "# Compute the square root of the brightness matrix", "# (as well as the sign)", "bsqrt", ",", "sgn_brightness", "=", "rime", ".", "b_sqrt", "(", "stokes", ",", "alpha", ",", "D", ".", "frequency", ",", "ref_freq", ",", "CT", "=", "CT", ",", "polarisation_type", "=", "polarisation_type", ")", "# Check for nans/infs in the bsqrt", "bsqrt_msg", "=", "(", "\"Check that your stokes parameters \"", "\"satisfy I**2 >= Q**2 + U**2 + V**2. \"", "\"Montblanc performs a cholesky decomposition \"", "\"of the brightness matrix and the above must \"", "\"hold for this to produce valid values.\"", ")", "bsqrt_real", "=", "tf", ".", "check_numerics", "(", "tf", ".", "real", "(", "bsqrt", ")", ",", "bsqrt_msg", ")", "bsqrt_imag", "=", "tf", ".", "check_numerics", "(", "tf", ".", "imag", "(", "bsqrt", ")", ",", "bsqrt_msg", ")", "# Compute the direction dependent effects from the beam", "ejones", "=", "rime", ".", "e_beam", "(", "lm", ",", "D", ".", "frequency", ",", "D", ".", "pointing_errors", ",", "D", ".", "antenna_scaling", ",", "beam_sin", ",", "beam_cos", ",", "D", ".", "beam_extents", ",", "D", ".", "beam_freq_map", ",", "D", ".", "ebeam", ")", "deps", "=", "[", "phase_real", ",", "phase_imag", ",", "bsqrt_real", ",", "bsqrt_imag", "]", "deps", "=", "[", "]", "# Do nothing for now", "# Combine the brightness square root, complex phase,", "# feed rotation and beam dde's", "with", "tf", ".", "control_dependencies", "(", "deps", ")", ":", "antenna_jones", "=", "rime", ".", "create_antenna_jones", "(", "bsqrt", ",", "cplx_phase", ",", "feed_rotation", ",", "ejones", ",", "FT", "=", "FT", ")", "return", "antenna_jones", ",", "sgn_brightness", "# While loop condition for each point source type", "def", "point_cond", "(", "coherencies", ",", "npsrc", ",", "src_count", ")", ":", "return", "tf", ".", "less", "(", "npsrc", ",", "src_ph_vars", ".", "npsrc", ")", "def", "gaussian_cond", "(", "coherencies", ",", "ngsrc", ",", "src_count", ")", ":", "return", "tf", ".", "less", "(", "ngsrc", ",", "src_ph_vars", ".", "ngsrc", ")", "def", "sersic_cond", "(", "coherencies", ",", "nssrc", ",", "src_count", ")", ":", "return", "tf", ".", "less", "(", "nssrc", ",", "src_ph_vars", ".", "nssrc", ")", "# While loop bodies", "def", "point_body", "(", "coherencies", ",", "npsrc", ",", "src_count", ")", ":", "\"\"\" Accumulate visiblities for point source batch \"\"\"", "S", "=", "LSA", ".", "sources", "[", "'npsrc'", "]", "[", "shard", "]", ".", "get_to_attrdict", "(", ")", "# Maintain source counts", "nsrc", "=", "tf", ".", "shape", "(", "S", ".", "point_lm", ")", "[", "0", "]", "src_count", "+=", "nsrc", "npsrc", "+=", "nsrc", "ant_jones", ",", "sgn_brightness", "=", "antenna_jones", "(", "S", ".", "point_lm", ",", "S", ".", "point_stokes", ",", "S", ".", "point_alpha", ",", "S", ".", "point_ref_freq", ")", "shape", "=", "tf", ".", "ones", "(", "shape", "=", "[", "nsrc", ",", "ntime", ",", "nbl", ",", "nchan", "]", ",", "dtype", "=", "FT", ")", "coherencies", "=", "rime", ".", "sum_coherencies", "(", "D", ".", "antenna1", ",", "D", ".", "antenna2", ",", "shape", ",", "ant_jones", ",", "sgn_brightness", ",", "coherencies", ")", "return", "coherencies", ",", "npsrc", ",", "src_count", "def", "gaussian_body", "(", "coherencies", ",", "ngsrc", ",", "src_count", ")", ":", "\"\"\" Accumulate coherencies for gaussian source batch \"\"\"", "S", "=", "LSA", ".", "sources", "[", "'ngsrc'", "]", "[", "shard", "]", ".", "get_to_attrdict", "(", ")", "# Maintain source counts", "nsrc", "=", "tf", ".", "shape", "(", "S", ".", "gaussian_lm", ")", "[", "0", "]", "src_count", "+=", "nsrc", "ngsrc", "+=", "nsrc", "ant_jones", ",", "sgn_brightness", "=", "antenna_jones", "(", "S", ".", "gaussian_lm", ",", "S", ".", "gaussian_stokes", ",", "S", ".", "gaussian_alpha", ",", "S", ".", "gaussian_ref_freq", ")", "gauss_shape", "=", "rime", ".", "gauss_shape", "(", "D", ".", "uvw", ",", "D", ".", "antenna1", ",", "D", ".", "antenna2", ",", "D", ".", "frequency", ",", "S", ".", "gaussian_shape", ")", "coherencies", "=", "rime", ".", "sum_coherencies", "(", "D", ".", "antenna1", ",", "D", ".", "antenna2", ",", "gauss_shape", ",", "ant_jones", ",", "sgn_brightness", ",", "coherencies", ")", "return", "coherencies", ",", "ngsrc", ",", "src_count", "def", "sersic_body", "(", "coherencies", ",", "nssrc", ",", "src_count", ")", ":", "\"\"\" Accumulate coherencies for sersic source batch \"\"\"", "S", "=", "LSA", ".", "sources", "[", "'nssrc'", "]", "[", "shard", "]", ".", "get_to_attrdict", "(", ")", "# Maintain source counts", "nsrc", "=", "tf", ".", "shape", "(", "S", ".", "sersic_lm", ")", "[", "0", "]", "src_count", "+=", "nsrc", "nssrc", "+=", "nsrc", "ant_jones", ",", "sgn_brightness", "=", "antenna_jones", "(", "S", ".", "sersic_lm", ",", "S", ".", "sersic_stokes", ",", "S", ".", "sersic_alpha", ",", "S", ".", "sersic_ref_freq", ")", "sersic_shape", "=", "rime", ".", "sersic_shape", "(", "D", ".", "uvw", ",", "D", ".", "antenna1", ",", "D", ".", "antenna2", ",", "D", ".", "frequency", ",", "S", ".", "sersic_shape", ")", "coherencies", "=", "rime", ".", "sum_coherencies", "(", "D", ".", "antenna1", ",", "D", ".", "antenna2", ",", "sersic_shape", ",", "ant_jones", ",", "sgn_brightness", ",", "coherencies", ")", "return", "coherencies", ",", "nssrc", ",", "src_count", "with", "tf", ".", "device", "(", "device", ")", ":", "base_coherencies", "=", "tf", ".", "zeros", "(", "shape", "=", "[", "ntime", ",", "nbl", ",", "nchan", ",", "npol", "]", ",", "dtype", "=", "CT", ")", "# Evaluate point sources", "summed_coherencies", ",", "npsrc", ",", "src_count", "=", "tf", ".", "while_loop", "(", "point_cond", ",", "point_body", ",", "[", "base_coherencies", ",", "zero", ",", "src_count", "]", ")", "# Evaluate gaussians", "summed_coherencies", ",", "ngsrc", ",", "src_count", "=", "tf", ".", "while_loop", "(", "gaussian_cond", ",", "gaussian_body", ",", "[", "summed_coherencies", ",", "zero", ",", "src_count", "]", ")", "# Evaluate sersics", "summed_coherencies", ",", "nssrc", ",", "src_count", "=", "tf", ".", "while_loop", "(", "sersic_cond", ",", "sersic_body", ",", "[", "summed_coherencies", ",", "zero", ",", "src_count", "]", ")", "# Post process visibilities to produce model visibilites and chi squared", "model_vis", ",", "chi_squared", "=", "rime", ".", "post_process_visibilities", "(", "D", ".", "antenna1", ",", "D", ".", "antenna2", ",", "D", ".", "direction_independent_effects", ",", "D", ".", "flag", ",", "D", ".", "weight", ",", "D", ".", "model_vis", ",", "summed_coherencies", ",", "D", ".", "observed_vis", ")", "# Create enstaging_area operation", "put_op", "=", "LSA", ".", "output", ".", "put_from_list", "(", "[", "D", ".", "descriptor", ",", "model_vis", ",", "chi_squared", "]", ")", "# Return descriptor and enstaging_area operation", "return", "D", ".", "descriptor", ",", "put_op" ]
Constructs a tensorflow expression for computing the RIME
[ "Constructs", "a", "tensorflow", "expression", "for", "computing", "the", "RIME" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/RimeSolver.py#L924-L1104
ska-sa/montblanc
montblanc/impl/rime/tensorflow/RimeSolver.py
_get_data
def _get_data(data_source, context): """ Get data from the data source, checking the return values """ try: # Get data from the data source data = data_source.source(context) # Complain about None values if data is None: raise ValueError("'None' returned from " "data source '{n}'".format(n=context.name)) # We want numpy arrays elif not isinstance(data, np.ndarray): raise TypeError("Data source '{n}' did not " "return a numpy array, returned a '{t}'".format( t=type(data))) # And they should be the right shape and type elif data.shape != context.shape or data.dtype != context.dtype: raise ValueError("Expected data of shape '{esh}' and " "dtype '{edt}' for data source '{n}', but " "shape '{rsh}' and '{rdt}' was found instead".format( n=context.name, esh=context.shape, edt=context.dtype, rsh=data.shape, rdt=data.dtype)) return data except Exception as e: ex = ValueError("An exception occurred while " "obtaining data from data source '{ds}'\n\n" "{e}\n\n" "{help}".format(ds=context.name, e=str(e), help=context.help())) raise ex, None, sys.exc_info()[2]
python
def _get_data(data_source, context): """ Get data from the data source, checking the return values """ try: # Get data from the data source data = data_source.source(context) # Complain about None values if data is None: raise ValueError("'None' returned from " "data source '{n}'".format(n=context.name)) # We want numpy arrays elif not isinstance(data, np.ndarray): raise TypeError("Data source '{n}' did not " "return a numpy array, returned a '{t}'".format( t=type(data))) # And they should be the right shape and type elif data.shape != context.shape or data.dtype != context.dtype: raise ValueError("Expected data of shape '{esh}' and " "dtype '{edt}' for data source '{n}', but " "shape '{rsh}' and '{rdt}' was found instead".format( n=context.name, esh=context.shape, edt=context.dtype, rsh=data.shape, rdt=data.dtype)) return data except Exception as e: ex = ValueError("An exception occurred while " "obtaining data from data source '{ds}'\n\n" "{e}\n\n" "{help}".format(ds=context.name, e=str(e), help=context.help())) raise ex, None, sys.exc_info()[2]
[ "def", "_get_data", "(", "data_source", ",", "context", ")", ":", "try", ":", "# Get data from the data source", "data", "=", "data_source", ".", "source", "(", "context", ")", "# Complain about None values", "if", "data", "is", "None", ":", "raise", "ValueError", "(", "\"'None' returned from \"", "\"data source '{n}'\"", ".", "format", "(", "n", "=", "context", ".", "name", ")", ")", "# We want numpy arrays", "elif", "not", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"Data source '{n}' did not \"", "\"return a numpy array, returned a '{t}'\"", ".", "format", "(", "t", "=", "type", "(", "data", ")", ")", ")", "# And they should be the right shape and type", "elif", "data", ".", "shape", "!=", "context", ".", "shape", "or", "data", ".", "dtype", "!=", "context", ".", "dtype", ":", "raise", "ValueError", "(", "\"Expected data of shape '{esh}' and \"", "\"dtype '{edt}' for data source '{n}', but \"", "\"shape '{rsh}' and '{rdt}' was found instead\"", ".", "format", "(", "n", "=", "context", ".", "name", ",", "esh", "=", "context", ".", "shape", ",", "edt", "=", "context", ".", "dtype", ",", "rsh", "=", "data", ".", "shape", ",", "rdt", "=", "data", ".", "dtype", ")", ")", "return", "data", "except", "Exception", "as", "e", ":", "ex", "=", "ValueError", "(", "\"An exception occurred while \"", "\"obtaining data from data source '{ds}'\\n\\n\"", "\"{e}\\n\\n\"", "\"{help}\"", ".", "format", "(", "ds", "=", "context", ".", "name", ",", "e", "=", "str", "(", "e", ")", ",", "help", "=", "context", ".", "help", "(", ")", ")", ")", "raise", "ex", ",", "None", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]" ]
Get data from the data source, checking the return values
[ "Get", "data", "from", "the", "data", "source", "checking", "the", "return", "values" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/RimeSolver.py#L1106-L1138
ska-sa/montblanc
montblanc/impl/rime/tensorflow/RimeSolver.py
_supply_data
def _supply_data(data_sink, context): """ Supply data to the data sink """ try: data_sink.sink(context) except Exception as e: ex = ValueError("An exception occurred while " "supplying data to data sink '{ds}'\n\n" "{e}\n\n" "{help}".format(ds=context.name, e=str(e), help=context.help())) raise ex, None, sys.exc_info()[2]
python
def _supply_data(data_sink, context): """ Supply data to the data sink """ try: data_sink.sink(context) except Exception as e: ex = ValueError("An exception occurred while " "supplying data to data sink '{ds}'\n\n" "{e}\n\n" "{help}".format(ds=context.name, e=str(e), help=context.help())) raise ex, None, sys.exc_info()[2]
[ "def", "_supply_data", "(", "data_sink", ",", "context", ")", ":", "try", ":", "data_sink", ".", "sink", "(", "context", ")", "except", "Exception", "as", "e", ":", "ex", "=", "ValueError", "(", "\"An exception occurred while \"", "\"supplying data to data sink '{ds}'\\n\\n\"", "\"{e}\\n\\n\"", "\"{help}\"", ".", "format", "(", "ds", "=", "context", ".", "name", ",", "e", "=", "str", "(", "e", ")", ",", "help", "=", "context", ".", "help", "(", ")", ")", ")", "raise", "ex", ",", "None", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]" ]
Supply data to the data sink
[ "Supply", "data", "to", "the", "data", "sink" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/RimeSolver.py#L1140-L1151
ska-sa/montblanc
montblanc/impl/rime/tensorflow/RimeSolver.py
_apply_source_provider_dim_updates
def _apply_source_provider_dim_updates(cube, source_providers, budget_dims): """ Given a list of source_providers, apply the list of suggested dimension updates given in provider.updated_dimensions() to the supplied hypercube. Dimension global_sizes are always updated with the supplied sizes and lower_extent is always set to 0. upper_extent is set to any reductions (current upper_extents) existing in budget_dims, otherwise it is set to global_size. """ # Create a mapping between a dimension and a # list of (global_size, provider_name) tuples update_map = collections.defaultdict(list) for prov in source_providers: for dim_tuple in prov.updated_dimensions(): name, size = dim_tuple # Don't accept any updates on the nsrc dimension # This is managed internally if name == 'nsrc': continue dim_update = DimensionUpdate(size, prov.name()) update_map[name].append(dim_update) # No dimensions were updated, quit early if len(update_map) == 0: return cube.bytes_required() # Ensure that the global sizes we receive # for each dimension are unique. Tell the user # when conflicts occur update_list = [] for name, updates in update_map.iteritems(): if not all(updates[0].size == du.size for du in updates[1:]): raise ValueError("Received conflicting " "global size updates '{u}'" " for dimension '{n}'.".format(n=name, u=updates)) update_list.append((name, updates[0].size)) montblanc.log.info("Updating dimensions {} from " "source providers.".format(str(update_list))) # Now update our dimensions for name, global_size in update_list: # Defer to existing any existing budgeted extent sizes # Otherwise take the global_size extent_size = budget_dims.get(name, global_size) # Take the global_size if extent_size was previously zero! extent_size = global_size if extent_size == 0 else extent_size # Clamp extent size to global size if extent_size > global_size: extent_size = global_size # Update the dimension cube.update_dimension(name, global_size=global_size, lower_extent=0, upper_extent=extent_size) # Handle global number of sources differently # It's equal to the number of # point's, gaussian's, sersic's combined nsrc = sum(cube.dim_global_size(*mbu.source_nr_vars())) # Extent size will be equal to whatever source type # we're currently iterating over. So just take # the maximum extent size given the sources es = max(cube.dim_extent_size(*mbu.source_nr_vars())) cube.update_dimension('nsrc', global_size=nsrc, lower_extent=0, upper_extent=es) # Return our cube size return cube.bytes_required()
python
def _apply_source_provider_dim_updates(cube, source_providers, budget_dims): """ Given a list of source_providers, apply the list of suggested dimension updates given in provider.updated_dimensions() to the supplied hypercube. Dimension global_sizes are always updated with the supplied sizes and lower_extent is always set to 0. upper_extent is set to any reductions (current upper_extents) existing in budget_dims, otherwise it is set to global_size. """ # Create a mapping between a dimension and a # list of (global_size, provider_name) tuples update_map = collections.defaultdict(list) for prov in source_providers: for dim_tuple in prov.updated_dimensions(): name, size = dim_tuple # Don't accept any updates on the nsrc dimension # This is managed internally if name == 'nsrc': continue dim_update = DimensionUpdate(size, prov.name()) update_map[name].append(dim_update) # No dimensions were updated, quit early if len(update_map) == 0: return cube.bytes_required() # Ensure that the global sizes we receive # for each dimension are unique. Tell the user # when conflicts occur update_list = [] for name, updates in update_map.iteritems(): if not all(updates[0].size == du.size for du in updates[1:]): raise ValueError("Received conflicting " "global size updates '{u}'" " for dimension '{n}'.".format(n=name, u=updates)) update_list.append((name, updates[0].size)) montblanc.log.info("Updating dimensions {} from " "source providers.".format(str(update_list))) # Now update our dimensions for name, global_size in update_list: # Defer to existing any existing budgeted extent sizes # Otherwise take the global_size extent_size = budget_dims.get(name, global_size) # Take the global_size if extent_size was previously zero! extent_size = global_size if extent_size == 0 else extent_size # Clamp extent size to global size if extent_size > global_size: extent_size = global_size # Update the dimension cube.update_dimension(name, global_size=global_size, lower_extent=0, upper_extent=extent_size) # Handle global number of sources differently # It's equal to the number of # point's, gaussian's, sersic's combined nsrc = sum(cube.dim_global_size(*mbu.source_nr_vars())) # Extent size will be equal to whatever source type # we're currently iterating over. So just take # the maximum extent size given the sources es = max(cube.dim_extent_size(*mbu.source_nr_vars())) cube.update_dimension('nsrc', global_size=nsrc, lower_extent=0, upper_extent=es) # Return our cube size return cube.bytes_required()
[ "def", "_apply_source_provider_dim_updates", "(", "cube", ",", "source_providers", ",", "budget_dims", ")", ":", "# Create a mapping between a dimension and a", "# list of (global_size, provider_name) tuples", "update_map", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "prov", "in", "source_providers", ":", "for", "dim_tuple", "in", "prov", ".", "updated_dimensions", "(", ")", ":", "name", ",", "size", "=", "dim_tuple", "# Don't accept any updates on the nsrc dimension", "# This is managed internally", "if", "name", "==", "'nsrc'", ":", "continue", "dim_update", "=", "DimensionUpdate", "(", "size", ",", "prov", ".", "name", "(", ")", ")", "update_map", "[", "name", "]", ".", "append", "(", "dim_update", ")", "# No dimensions were updated, quit early", "if", "len", "(", "update_map", ")", "==", "0", ":", "return", "cube", ".", "bytes_required", "(", ")", "# Ensure that the global sizes we receive", "# for each dimension are unique. Tell the user", "# when conflicts occur", "update_list", "=", "[", "]", "for", "name", ",", "updates", "in", "update_map", ".", "iteritems", "(", ")", ":", "if", "not", "all", "(", "updates", "[", "0", "]", ".", "size", "==", "du", ".", "size", "for", "du", "in", "updates", "[", "1", ":", "]", ")", ":", "raise", "ValueError", "(", "\"Received conflicting \"", "\"global size updates '{u}'\"", "\" for dimension '{n}'.\"", ".", "format", "(", "n", "=", "name", ",", "u", "=", "updates", ")", ")", "update_list", ".", "append", "(", "(", "name", ",", "updates", "[", "0", "]", ".", "size", ")", ")", "montblanc", ".", "log", ".", "info", "(", "\"Updating dimensions {} from \"", "\"source providers.\"", ".", "format", "(", "str", "(", "update_list", ")", ")", ")", "# Now update our dimensions", "for", "name", ",", "global_size", "in", "update_list", ":", "# Defer to existing any existing budgeted extent sizes", "# Otherwise take the global_size", "extent_size", "=", "budget_dims", ".", "get", "(", "name", ",", "global_size", ")", "# Take the global_size if extent_size was previously zero!", "extent_size", "=", "global_size", "if", "extent_size", "==", "0", "else", "extent_size", "# Clamp extent size to global size", "if", "extent_size", ">", "global_size", ":", "extent_size", "=", "global_size", "# Update the dimension", "cube", ".", "update_dimension", "(", "name", ",", "global_size", "=", "global_size", ",", "lower_extent", "=", "0", ",", "upper_extent", "=", "extent_size", ")", "# Handle global number of sources differently", "# It's equal to the number of", "# point's, gaussian's, sersic's combined", "nsrc", "=", "sum", "(", "cube", ".", "dim_global_size", "(", "*", "mbu", ".", "source_nr_vars", "(", ")", ")", ")", "# Extent size will be equal to whatever source type", "# we're currently iterating over. So just take", "# the maximum extent size given the sources", "es", "=", "max", "(", "cube", ".", "dim_extent_size", "(", "*", "mbu", ".", "source_nr_vars", "(", ")", ")", ")", "cube", ".", "update_dimension", "(", "'nsrc'", ",", "global_size", "=", "nsrc", ",", "lower_extent", "=", "0", ",", "upper_extent", "=", "es", ")", "# Return our cube size", "return", "cube", ".", "bytes_required", "(", ")" ]
Given a list of source_providers, apply the list of suggested dimension updates given in provider.updated_dimensions() to the supplied hypercube. Dimension global_sizes are always updated with the supplied sizes and lower_extent is always set to 0. upper_extent is set to any reductions (current upper_extents) existing in budget_dims, otherwise it is set to global_size.
[ "Given", "a", "list", "of", "source_providers", "apply", "the", "list", "of", "suggested", "dimension", "updates", "given", "in", "provider", ".", "updated_dimensions", "()", "to", "the", "supplied", "hypercube", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/RimeSolver.py#L1234-L1317
ska-sa/montblanc
montblanc/impl/rime/tensorflow/RimeSolver.py
_setup_hypercube
def _setup_hypercube(cube, slvr_cfg): """ Sets up the hypercube given a solver configuration """ mbu.register_default_dimensions(cube, slvr_cfg) # Configure the dimensions of the beam cube cube.register_dimension('beam_lw', 2, description='E Beam cube l width') cube.register_dimension('beam_mh', 2, description='E Beam cube m height') cube.register_dimension('beam_nud', 2, description='E Beam cube nu depth') # ========================================= # Register hypercube Arrays and Properties # ========================================= from montblanc.impl.rime.tensorflow.config import (A, P) def _massage_dtypes(A, T): def _massage_dtype_in_dict(D): new_dict = D.copy() new_dict['dtype'] = mbu.dtype_from_str(D['dtype'], T) return new_dict return [_massage_dtype_in_dict(D) for D in A] dtype = slvr_cfg['dtype'] is_f32 = dtype == 'float' T = { 'ft' : np.float32 if is_f32 else np.float64, 'ct' : np.complex64 if is_f32 else np.complex128, 'int' : int, } cube.register_properties(_massage_dtypes(P, T)) cube.register_arrays(_massage_dtypes(A, T))
python
def _setup_hypercube(cube, slvr_cfg): """ Sets up the hypercube given a solver configuration """ mbu.register_default_dimensions(cube, slvr_cfg) # Configure the dimensions of the beam cube cube.register_dimension('beam_lw', 2, description='E Beam cube l width') cube.register_dimension('beam_mh', 2, description='E Beam cube m height') cube.register_dimension('beam_nud', 2, description='E Beam cube nu depth') # ========================================= # Register hypercube Arrays and Properties # ========================================= from montblanc.impl.rime.tensorflow.config import (A, P) def _massage_dtypes(A, T): def _massage_dtype_in_dict(D): new_dict = D.copy() new_dict['dtype'] = mbu.dtype_from_str(D['dtype'], T) return new_dict return [_massage_dtype_in_dict(D) for D in A] dtype = slvr_cfg['dtype'] is_f32 = dtype == 'float' T = { 'ft' : np.float32 if is_f32 else np.float64, 'ct' : np.complex64 if is_f32 else np.complex128, 'int' : int, } cube.register_properties(_massage_dtypes(P, T)) cube.register_arrays(_massage_dtypes(A, T))
[ "def", "_setup_hypercube", "(", "cube", ",", "slvr_cfg", ")", ":", "mbu", ".", "register_default_dimensions", "(", "cube", ",", "slvr_cfg", ")", "# Configure the dimensions of the beam cube", "cube", ".", "register_dimension", "(", "'beam_lw'", ",", "2", ",", "description", "=", "'E Beam cube l width'", ")", "cube", ".", "register_dimension", "(", "'beam_mh'", ",", "2", ",", "description", "=", "'E Beam cube m height'", ")", "cube", ".", "register_dimension", "(", "'beam_nud'", ",", "2", ",", "description", "=", "'E Beam cube nu depth'", ")", "# =========================================", "# Register hypercube Arrays and Properties", "# =========================================", "from", "montblanc", ".", "impl", ".", "rime", ".", "tensorflow", ".", "config", "import", "(", "A", ",", "P", ")", "def", "_massage_dtypes", "(", "A", ",", "T", ")", ":", "def", "_massage_dtype_in_dict", "(", "D", ")", ":", "new_dict", "=", "D", ".", "copy", "(", ")", "new_dict", "[", "'dtype'", "]", "=", "mbu", ".", "dtype_from_str", "(", "D", "[", "'dtype'", "]", ",", "T", ")", "return", "new_dict", "return", "[", "_massage_dtype_in_dict", "(", "D", ")", "for", "D", "in", "A", "]", "dtype", "=", "slvr_cfg", "[", "'dtype'", "]", "is_f32", "=", "dtype", "==", "'float'", "T", "=", "{", "'ft'", ":", "np", ".", "float32", "if", "is_f32", "else", "np", ".", "float64", ",", "'ct'", ":", "np", ".", "complex64", "if", "is_f32", "else", "np", ".", "complex128", ",", "'int'", ":", "int", ",", "}", "cube", ".", "register_properties", "(", "_massage_dtypes", "(", "P", ",", "T", ")", ")", "cube", ".", "register_arrays", "(", "_massage_dtypes", "(", "A", ",", "T", ")", ")" ]
Sets up the hypercube given a solver configuration
[ "Sets", "up", "the", "hypercube", "given", "a", "solver", "configuration" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/RimeSolver.py#L1319-L1357
ska-sa/montblanc
montblanc/impl/rime/tensorflow/RimeSolver.py
_partition
def _partition(iter_dims, data_sources): """ Partition data sources into 1. Dictionary of data sources associated with radio sources. 2. List of data sources to feed multiple times. 3. List of data sources to feed once. """ src_nr_vars = set(source_var_types().values()) iter_dims = set(iter_dims) src_data_sources = collections.defaultdict(list) feed_many = [] feed_once = [] for ds in data_sources: # Is this data source associated with # a radio source (point, gaussian, etc.?) src_int = src_nr_vars.intersection(ds.shape) if len(src_int) > 1: raise ValueError("Data source '{}' contains multiple " "source types '{}'".format(ds.name, src_int)) elif len(src_int) == 1: # Yep, record appropriately and iterate src_data_sources[src_int.pop()].append(ds) continue # Are we feeding this data source multiple times # (Does it possess dimensions on which we iterate?) if len(iter_dims.intersection(ds.shape)) > 0: feed_many.append(ds) continue # Assume this is a data source that we only feed once feed_once.append(ds) return src_data_sources, feed_many, feed_once
python
def _partition(iter_dims, data_sources): """ Partition data sources into 1. Dictionary of data sources associated with radio sources. 2. List of data sources to feed multiple times. 3. List of data sources to feed once. """ src_nr_vars = set(source_var_types().values()) iter_dims = set(iter_dims) src_data_sources = collections.defaultdict(list) feed_many = [] feed_once = [] for ds in data_sources: # Is this data source associated with # a radio source (point, gaussian, etc.?) src_int = src_nr_vars.intersection(ds.shape) if len(src_int) > 1: raise ValueError("Data source '{}' contains multiple " "source types '{}'".format(ds.name, src_int)) elif len(src_int) == 1: # Yep, record appropriately and iterate src_data_sources[src_int.pop()].append(ds) continue # Are we feeding this data source multiple times # (Does it possess dimensions on which we iterate?) if len(iter_dims.intersection(ds.shape)) > 0: feed_many.append(ds) continue # Assume this is a data source that we only feed once feed_once.append(ds) return src_data_sources, feed_many, feed_once
[ "def", "_partition", "(", "iter_dims", ",", "data_sources", ")", ":", "src_nr_vars", "=", "set", "(", "source_var_types", "(", ")", ".", "values", "(", ")", ")", "iter_dims", "=", "set", "(", "iter_dims", ")", "src_data_sources", "=", "collections", ".", "defaultdict", "(", "list", ")", "feed_many", "=", "[", "]", "feed_once", "=", "[", "]", "for", "ds", "in", "data_sources", ":", "# Is this data source associated with", "# a radio source (point, gaussian, etc.?)", "src_int", "=", "src_nr_vars", ".", "intersection", "(", "ds", ".", "shape", ")", "if", "len", "(", "src_int", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Data source '{}' contains multiple \"", "\"source types '{}'\"", ".", "format", "(", "ds", ".", "name", ",", "src_int", ")", ")", "elif", "len", "(", "src_int", ")", "==", "1", ":", "# Yep, record appropriately and iterate", "src_data_sources", "[", "src_int", ".", "pop", "(", ")", "]", ".", "append", "(", "ds", ")", "continue", "# Are we feeding this data source multiple times", "# (Does it possess dimensions on which we iterate?)", "if", "len", "(", "iter_dims", ".", "intersection", "(", "ds", ".", "shape", ")", ")", ">", "0", ":", "feed_many", ".", "append", "(", "ds", ")", "continue", "# Assume this is a data source that we only feed once", "feed_once", ".", "append", "(", "ds", ")", "return", "src_data_sources", ",", "feed_many", ",", "feed_once" ]
Partition data sources into 1. Dictionary of data sources associated with radio sources. 2. List of data sources to feed multiple times. 3. List of data sources to feed once.
[ "Partition", "data", "sources", "into" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/RimeSolver.py#L1359-L1397
ska-sa/montblanc
montblanc/impl/rime/tensorflow/RimeSolver.py
RimeSolver._feed
def _feed(self, cube, data_sources, data_sinks, global_iter_args): """ Feed stub """ try: self._feed_impl(cube, data_sources, data_sinks, global_iter_args) except Exception as e: montblanc.log.exception("Feed Exception") raise
python
def _feed(self, cube, data_sources, data_sinks, global_iter_args): """ Feed stub """ try: self._feed_impl(cube, data_sources, data_sinks, global_iter_args) except Exception as e: montblanc.log.exception("Feed Exception") raise
[ "def", "_feed", "(", "self", ",", "cube", ",", "data_sources", ",", "data_sinks", ",", "global_iter_args", ")", ":", "try", ":", "self", ".", "_feed_impl", "(", "cube", ",", "data_sources", ",", "data_sinks", ",", "global_iter_args", ")", "except", "Exception", "as", "e", ":", "montblanc", ".", "log", ".", "exception", "(", "\"Feed Exception\"", ")", "raise" ]
Feed stub
[ "Feed", "stub" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/RimeSolver.py#L356-L362
ska-sa/montblanc
montblanc/impl/rime/tensorflow/RimeSolver.py
RimeSolver._feed_impl
def _feed_impl(self, cube, data_sources, data_sinks, global_iter_args): """ Implementation of staging_area feeding """ session = self._tf_session FD = self._tf_feed_data LSA = FD.local # Get source strides out before the local sizes are modified during # the source loops below src_types = LSA.sources.keys() src_strides = [int(i) for i in cube.dim_extent_size(*src_types)] src_staging_areas = [[LSA.sources[t][s] for t in src_types] for s in range(self._nr_of_shards)] compute_feed_dict = { ph: cube.dim_global_size(n) for n, ph in FD.src_ph_vars.iteritems() } compute_feed_dict.update({ ph: getattr(cube, n) for n, ph in FD.property_ph_vars.iteritems() }) chunks_fed = 0 which_shard = itertools.cycle([self._shard(d,s) for s in range(self._shards_per_device) for d, dev in enumerate(self._devices)]) while True: try: # Get the descriptor describing a portion of the RIME result = session.run(LSA.descriptor.get_op) descriptor = result['descriptor'] except tf.errors.OutOfRangeError as e: montblanc.log.exception("Descriptor reading exception") # Quit if EOF if descriptor[0] == -1: break # Make it read-only so we can hash the contents descriptor.flags.writeable = False # Find indices of the emptiest staging_areas and, by implication # the shard with the least work assigned to it emptiest_staging_areas = np.argsort(self._inputs_waiting.get()) shard = emptiest_staging_areas[0] shard = which_shard.next() feed_f = self._feed_executors[shard].submit(self._feed_actual, data_sources.copy(), cube.copy(), descriptor, shard, src_types, src_strides, src_staging_areas[shard], global_iter_args) compute_f = self._compute_executors[shard].submit(self._compute, compute_feed_dict, shard) consume_f = self._consumer_executor.submit(self._consume, data_sinks.copy(), cube.copy(), global_iter_args) self._inputs_waiting.increment(shard) yield (feed_f, compute_f, consume_f) chunks_fed += 1 montblanc.log.info("Done feeding {n} chunks.".format(n=chunks_fed))
python
def _feed_impl(self, cube, data_sources, data_sinks, global_iter_args): """ Implementation of staging_area feeding """ session = self._tf_session FD = self._tf_feed_data LSA = FD.local # Get source strides out before the local sizes are modified during # the source loops below src_types = LSA.sources.keys() src_strides = [int(i) for i in cube.dim_extent_size(*src_types)] src_staging_areas = [[LSA.sources[t][s] for t in src_types] for s in range(self._nr_of_shards)] compute_feed_dict = { ph: cube.dim_global_size(n) for n, ph in FD.src_ph_vars.iteritems() } compute_feed_dict.update({ ph: getattr(cube, n) for n, ph in FD.property_ph_vars.iteritems() }) chunks_fed = 0 which_shard = itertools.cycle([self._shard(d,s) for s in range(self._shards_per_device) for d, dev in enumerate(self._devices)]) while True: try: # Get the descriptor describing a portion of the RIME result = session.run(LSA.descriptor.get_op) descriptor = result['descriptor'] except tf.errors.OutOfRangeError as e: montblanc.log.exception("Descriptor reading exception") # Quit if EOF if descriptor[0] == -1: break # Make it read-only so we can hash the contents descriptor.flags.writeable = False # Find indices of the emptiest staging_areas and, by implication # the shard with the least work assigned to it emptiest_staging_areas = np.argsort(self._inputs_waiting.get()) shard = emptiest_staging_areas[0] shard = which_shard.next() feed_f = self._feed_executors[shard].submit(self._feed_actual, data_sources.copy(), cube.copy(), descriptor, shard, src_types, src_strides, src_staging_areas[shard], global_iter_args) compute_f = self._compute_executors[shard].submit(self._compute, compute_feed_dict, shard) consume_f = self._consumer_executor.submit(self._consume, data_sinks.copy(), cube.copy(), global_iter_args) self._inputs_waiting.increment(shard) yield (feed_f, compute_f, consume_f) chunks_fed += 1 montblanc.log.info("Done feeding {n} chunks.".format(n=chunks_fed))
[ "def", "_feed_impl", "(", "self", ",", "cube", ",", "data_sources", ",", "data_sinks", ",", "global_iter_args", ")", ":", "session", "=", "self", ".", "_tf_session", "FD", "=", "self", ".", "_tf_feed_data", "LSA", "=", "FD", ".", "local", "# Get source strides out before the local sizes are modified during", "# the source loops below", "src_types", "=", "LSA", ".", "sources", ".", "keys", "(", ")", "src_strides", "=", "[", "int", "(", "i", ")", "for", "i", "in", "cube", ".", "dim_extent_size", "(", "*", "src_types", ")", "]", "src_staging_areas", "=", "[", "[", "LSA", ".", "sources", "[", "t", "]", "[", "s", "]", "for", "t", "in", "src_types", "]", "for", "s", "in", "range", "(", "self", ".", "_nr_of_shards", ")", "]", "compute_feed_dict", "=", "{", "ph", ":", "cube", ".", "dim_global_size", "(", "n", ")", "for", "n", ",", "ph", "in", "FD", ".", "src_ph_vars", ".", "iteritems", "(", ")", "}", "compute_feed_dict", ".", "update", "(", "{", "ph", ":", "getattr", "(", "cube", ",", "n", ")", "for", "n", ",", "ph", "in", "FD", ".", "property_ph_vars", ".", "iteritems", "(", ")", "}", ")", "chunks_fed", "=", "0", "which_shard", "=", "itertools", ".", "cycle", "(", "[", "self", ".", "_shard", "(", "d", ",", "s", ")", "for", "s", "in", "range", "(", "self", ".", "_shards_per_device", ")", "for", "d", ",", "dev", "in", "enumerate", "(", "self", ".", "_devices", ")", "]", ")", "while", "True", ":", "try", ":", "# Get the descriptor describing a portion of the RIME", "result", "=", "session", ".", "run", "(", "LSA", ".", "descriptor", ".", "get_op", ")", "descriptor", "=", "result", "[", "'descriptor'", "]", "except", "tf", ".", "errors", ".", "OutOfRangeError", "as", "e", ":", "montblanc", ".", "log", ".", "exception", "(", "\"Descriptor reading exception\"", ")", "# Quit if EOF", "if", "descriptor", "[", "0", "]", "==", "-", "1", ":", "break", "# Make it read-only so we can hash the contents", "descriptor", ".", "flags", ".", "writeable", "=", "False", "# Find indices of the emptiest staging_areas and, by implication", "# the shard with the least work assigned to it", "emptiest_staging_areas", "=", "np", ".", "argsort", "(", "self", ".", "_inputs_waiting", ".", "get", "(", ")", ")", "shard", "=", "emptiest_staging_areas", "[", "0", "]", "shard", "=", "which_shard", ".", "next", "(", ")", "feed_f", "=", "self", ".", "_feed_executors", "[", "shard", "]", ".", "submit", "(", "self", ".", "_feed_actual", ",", "data_sources", ".", "copy", "(", ")", ",", "cube", ".", "copy", "(", ")", ",", "descriptor", ",", "shard", ",", "src_types", ",", "src_strides", ",", "src_staging_areas", "[", "shard", "]", ",", "global_iter_args", ")", "compute_f", "=", "self", ".", "_compute_executors", "[", "shard", "]", ".", "submit", "(", "self", ".", "_compute", ",", "compute_feed_dict", ",", "shard", ")", "consume_f", "=", "self", ".", "_consumer_executor", ".", "submit", "(", "self", ".", "_consume", ",", "data_sinks", ".", "copy", "(", ")", ",", "cube", ".", "copy", "(", ")", ",", "global_iter_args", ")", "self", ".", "_inputs_waiting", ".", "increment", "(", "shard", ")", "yield", "(", "feed_f", ",", "compute_f", ",", "consume_f", ")", "chunks_fed", "+=", "1", "montblanc", ".", "log", ".", "info", "(", "\"Done feeding {n} chunks.\"", ".", "format", "(", "n", "=", "chunks_fed", ")", ")" ]
Implementation of staging_area feeding
[ "Implementation", "of", "staging_area", "feeding" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/RimeSolver.py#L364-L427
ska-sa/montblanc
montblanc/impl/rime/tensorflow/RimeSolver.py
RimeSolver._compute
def _compute(self, feed_dict, shard): """ Call the tensorflow compute """ try: descriptor, enq = self._tfrun(self._tf_expr[shard], feed_dict=feed_dict) self._inputs_waiting.decrement(shard) except Exception as e: montblanc.log.exception("Compute Exception") raise
python
def _compute(self, feed_dict, shard): """ Call the tensorflow compute """ try: descriptor, enq = self._tfrun(self._tf_expr[shard], feed_dict=feed_dict) self._inputs_waiting.decrement(shard) except Exception as e: montblanc.log.exception("Compute Exception") raise
[ "def", "_compute", "(", "self", ",", "feed_dict", ",", "shard", ")", ":", "try", ":", "descriptor", ",", "enq", "=", "self", ".", "_tfrun", "(", "self", ".", "_tf_expr", "[", "shard", "]", ",", "feed_dict", "=", "feed_dict", ")", "self", ".", "_inputs_waiting", ".", "decrement", "(", "shard", ")", "except", "Exception", "as", "e", ":", "montblanc", ".", "log", ".", "exception", "(", "\"Compute Exception\"", ")", "raise" ]
Call the tensorflow compute
[ "Call", "the", "tensorflow", "compute" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/RimeSolver.py#L517-L526
ska-sa/montblanc
montblanc/impl/rime/tensorflow/RimeSolver.py
RimeSolver._consume
def _consume(self, data_sinks, cube, global_iter_args): """ Consume stub """ try: return self._consume_impl(data_sinks, cube, global_iter_args) except Exception as e: montblanc.log.exception("Consumer Exception") raise e, None, sys.exc_info()[2]
python
def _consume(self, data_sinks, cube, global_iter_args): """ Consume stub """ try: return self._consume_impl(data_sinks, cube, global_iter_args) except Exception as e: montblanc.log.exception("Consumer Exception") raise e, None, sys.exc_info()[2]
[ "def", "_consume", "(", "self", ",", "data_sinks", ",", "cube", ",", "global_iter_args", ")", ":", "try", ":", "return", "self", ".", "_consume_impl", "(", "data_sinks", ",", "cube", ",", "global_iter_args", ")", "except", "Exception", "as", "e", ":", "montblanc", ".", "log", ".", "exception", "(", "\"Consumer Exception\"", ")", "raise", "e", ",", "None", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]" ]
Consume stub
[ "Consume", "stub" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/RimeSolver.py#L529-L535
ska-sa/montblanc
montblanc/impl/rime/tensorflow/RimeSolver.py
RimeSolver._consume_impl
def _consume_impl(self, data_sinks, cube, global_iter_args): """ Consume """ LSA = self._tf_feed_data.local output = self._tfrun(LSA.output.get_op) # Expect the descriptor in the first tuple position assert len(output) > 0 assert LSA.output.fed_arrays[0] == 'descriptor' descriptor = output['descriptor'] # Make it read-only so we can hash the contents descriptor.flags.writeable = False dims = self._transcoder.decode(descriptor) cube.update_dimensions(dims) # Obtain and remove input data from the source cache try: input_data = self._source_cache.pop(descriptor.data) except KeyError: raise ValueError("No input data cache available " "in source cache for descriptor {}!" .format(descriptor)) # For each array in our output, call the associated data sink gen = ((n, a) for n, a in output.iteritems() if not n == 'descriptor') for n, a in gen: sink_context = SinkContext(n, cube, self.config(), global_iter_args, cube.array(n) if n in cube.arrays() else {}, a, input_data) _supply_data(data_sinks[n], sink_context)
python
def _consume_impl(self, data_sinks, cube, global_iter_args): """ Consume """ LSA = self._tf_feed_data.local output = self._tfrun(LSA.output.get_op) # Expect the descriptor in the first tuple position assert len(output) > 0 assert LSA.output.fed_arrays[0] == 'descriptor' descriptor = output['descriptor'] # Make it read-only so we can hash the contents descriptor.flags.writeable = False dims = self._transcoder.decode(descriptor) cube.update_dimensions(dims) # Obtain and remove input data from the source cache try: input_data = self._source_cache.pop(descriptor.data) except KeyError: raise ValueError("No input data cache available " "in source cache for descriptor {}!" .format(descriptor)) # For each array in our output, call the associated data sink gen = ((n, a) for n, a in output.iteritems() if not n == 'descriptor') for n, a in gen: sink_context = SinkContext(n, cube, self.config(), global_iter_args, cube.array(n) if n in cube.arrays() else {}, a, input_data) _supply_data(data_sinks[n], sink_context)
[ "def", "_consume_impl", "(", "self", ",", "data_sinks", ",", "cube", ",", "global_iter_args", ")", ":", "LSA", "=", "self", ".", "_tf_feed_data", ".", "local", "output", "=", "self", ".", "_tfrun", "(", "LSA", ".", "output", ".", "get_op", ")", "# Expect the descriptor in the first tuple position", "assert", "len", "(", "output", ")", ">", "0", "assert", "LSA", ".", "output", ".", "fed_arrays", "[", "0", "]", "==", "'descriptor'", "descriptor", "=", "output", "[", "'descriptor'", "]", "# Make it read-only so we can hash the contents", "descriptor", ".", "flags", ".", "writeable", "=", "False", "dims", "=", "self", ".", "_transcoder", ".", "decode", "(", "descriptor", ")", "cube", ".", "update_dimensions", "(", "dims", ")", "# Obtain and remove input data from the source cache", "try", ":", "input_data", "=", "self", ".", "_source_cache", ".", "pop", "(", "descriptor", ".", "data", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "\"No input data cache available \"", "\"in source cache for descriptor {}!\"", ".", "format", "(", "descriptor", ")", ")", "# For each array in our output, call the associated data sink", "gen", "=", "(", "(", "n", ",", "a", ")", "for", "n", ",", "a", "in", "output", ".", "iteritems", "(", ")", "if", "not", "n", "==", "'descriptor'", ")", "for", "n", ",", "a", "in", "gen", ":", "sink_context", "=", "SinkContext", "(", "n", ",", "cube", ",", "self", ".", "config", "(", ")", ",", "global_iter_args", ",", "cube", ".", "array", "(", "n", ")", "if", "n", "in", "cube", ".", "arrays", "(", ")", "else", "{", "}", ",", "a", ",", "input_data", ")", "_supply_data", "(", "data_sinks", "[", "n", "]", ",", "sink_context", ")" ]
Consume
[ "Consume" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/RimeSolver.py#L537-L571
ska-sa/montblanc
montblanc/__init__.py
rime_solver_cfg
def rime_solver_cfg(**kwargs): """ Produces a SolverConfiguration object, inherited from a simple python dict, and containing the options required to configure the RIME Solver. Keyword arguments ----------------- Any keyword arguments are inserted into the returned dict. Returns ------- A SolverConfiguration object. """ from configuration import (load_config, config_validator, raise_validator_errors) def _merge_copy(d1, d2): return { k: _merge_copy(d1[k], d2[k]) if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], dict) else d2[k] for k in d2 } try: cfg_file = kwargs.pop('cfg_file') except KeyError as e: slvr_cfg = kwargs else: cfg = load_config(cfg_file) slvr_cfg = _merge_copy(cfg, kwargs) # Validate the configuration, raising any errors validator = config_validator() validator.validate(slvr_cfg) raise_validator_errors(validator) return validator.document
python
def rime_solver_cfg(**kwargs): """ Produces a SolverConfiguration object, inherited from a simple python dict, and containing the options required to configure the RIME Solver. Keyword arguments ----------------- Any keyword arguments are inserted into the returned dict. Returns ------- A SolverConfiguration object. """ from configuration import (load_config, config_validator, raise_validator_errors) def _merge_copy(d1, d2): return { k: _merge_copy(d1[k], d2[k]) if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], dict) else d2[k] for k in d2 } try: cfg_file = kwargs.pop('cfg_file') except KeyError as e: slvr_cfg = kwargs else: cfg = load_config(cfg_file) slvr_cfg = _merge_copy(cfg, kwargs) # Validate the configuration, raising any errors validator = config_validator() validator.validate(slvr_cfg) raise_validator_errors(validator) return validator.document
[ "def", "rime_solver_cfg", "(", "*", "*", "kwargs", ")", ":", "from", "configuration", "import", "(", "load_config", ",", "config_validator", ",", "raise_validator_errors", ")", "def", "_merge_copy", "(", "d1", ",", "d2", ")", ":", "return", "{", "k", ":", "_merge_copy", "(", "d1", "[", "k", "]", ",", "d2", "[", "k", "]", ")", "if", "k", "in", "d1", "and", "isinstance", "(", "d1", "[", "k", "]", ",", "dict", ")", "and", "isinstance", "(", "d2", "[", "k", "]", ",", "dict", ")", "else", "d2", "[", "k", "]", "for", "k", "in", "d2", "}", "try", ":", "cfg_file", "=", "kwargs", ".", "pop", "(", "'cfg_file'", ")", "except", "KeyError", "as", "e", ":", "slvr_cfg", "=", "kwargs", "else", ":", "cfg", "=", "load_config", "(", "cfg_file", ")", "slvr_cfg", "=", "_merge_copy", "(", "cfg", ",", "kwargs", ")", "# Validate the configuration, raising any errors", "validator", "=", "config_validator", "(", ")", "validator", ".", "validate", "(", "slvr_cfg", ")", "raise_validator_errors", "(", "validator", ")", "return", "validator", ".", "document" ]
Produces a SolverConfiguration object, inherited from a simple python dict, and containing the options required to configure the RIME Solver. Keyword arguments ----------------- Any keyword arguments are inserted into the returned dict. Returns ------- A SolverConfiguration object.
[ "Produces", "a", "SolverConfiguration", "object", "inherited", "from", "a", "simple", "python", "dict", "and", "containing", "the", "options", "required", "to", "configure", "the", "RIME", "Solver", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/__init__.py#L50-L87
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/fits_beam_source_provider.py
_create_filenames
def _create_filenames(filename_schema, feed_type): """ Returns a dictionary of beam filename pairs, keyed on correlation,from the cartesian product of correlations and real, imaginary pairs Given 'beam_$(corr)_$(reim).fits' returns: { 'xx' : ('beam_xx_re.fits', 'beam_xx_im.fits'), 'xy' : ('beam_xy_re.fits', 'beam_xy_im.fits'), ... 'yy' : ('beam_yy_re.fits', 'beam_yy_im.fits'), } Given 'beam_$(CORR)_$(REIM).fits' returns: { 'xx' : ('beam_XX_RE.fits', 'beam_XX_IM.fits'), 'xy' : ('beam_XY_RE.fits', 'beam_XY_IM.fits'), ... 'yy' : ('beam_YY_RE.fits', 'beam_YY_IM.fits'), } """ template = FitsFilenameTemplate(filename_schema) def _re_im_filenames(corr, template): try: return tuple(template.substitute( corr=corr.lower(), CORR=corr.upper(), reim=ri.lower(), REIM=ri.upper()) for ri in REIM) except KeyError: raise ValueError("Invalid filename schema '%s'. " "FITS Beam filename schemas " "must follow forms such as " "'beam_$(corr)_$(reim).fits' or " "'beam_$(CORR)_$(REIM).fits." % filename_schema) if feed_type == 'linear': CORRELATIONS = LINEAR_CORRELATIONS elif feed_type == 'circular': CORRELATIONS = CIRCULAR_CORRELATIONS else: raise ValueError("Invalid feed_type '{}'. " "Should be 'linear' or 'circular'") return collections.OrderedDict( (c, _re_im_filenames(c, template)) for c in CORRELATIONS)
python
def _create_filenames(filename_schema, feed_type): """ Returns a dictionary of beam filename pairs, keyed on correlation,from the cartesian product of correlations and real, imaginary pairs Given 'beam_$(corr)_$(reim).fits' returns: { 'xx' : ('beam_xx_re.fits', 'beam_xx_im.fits'), 'xy' : ('beam_xy_re.fits', 'beam_xy_im.fits'), ... 'yy' : ('beam_yy_re.fits', 'beam_yy_im.fits'), } Given 'beam_$(CORR)_$(REIM).fits' returns: { 'xx' : ('beam_XX_RE.fits', 'beam_XX_IM.fits'), 'xy' : ('beam_XY_RE.fits', 'beam_XY_IM.fits'), ... 'yy' : ('beam_YY_RE.fits', 'beam_YY_IM.fits'), } """ template = FitsFilenameTemplate(filename_schema) def _re_im_filenames(corr, template): try: return tuple(template.substitute( corr=corr.lower(), CORR=corr.upper(), reim=ri.lower(), REIM=ri.upper()) for ri in REIM) except KeyError: raise ValueError("Invalid filename schema '%s'. " "FITS Beam filename schemas " "must follow forms such as " "'beam_$(corr)_$(reim).fits' or " "'beam_$(CORR)_$(REIM).fits." % filename_schema) if feed_type == 'linear': CORRELATIONS = LINEAR_CORRELATIONS elif feed_type == 'circular': CORRELATIONS = CIRCULAR_CORRELATIONS else: raise ValueError("Invalid feed_type '{}'. " "Should be 'linear' or 'circular'") return collections.OrderedDict( (c, _re_im_filenames(c, template)) for c in CORRELATIONS)
[ "def", "_create_filenames", "(", "filename_schema", ",", "feed_type", ")", ":", "template", "=", "FitsFilenameTemplate", "(", "filename_schema", ")", "def", "_re_im_filenames", "(", "corr", ",", "template", ")", ":", "try", ":", "return", "tuple", "(", "template", ".", "substitute", "(", "corr", "=", "corr", ".", "lower", "(", ")", ",", "CORR", "=", "corr", ".", "upper", "(", ")", ",", "reim", "=", "ri", ".", "lower", "(", ")", ",", "REIM", "=", "ri", ".", "upper", "(", ")", ")", "for", "ri", "in", "REIM", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Invalid filename schema '%s'. \"", "\"FITS Beam filename schemas \"", "\"must follow forms such as \"", "\"'beam_$(corr)_$(reim).fits' or \"", "\"'beam_$(CORR)_$(REIM).fits.\"", "%", "filename_schema", ")", "if", "feed_type", "==", "'linear'", ":", "CORRELATIONS", "=", "LINEAR_CORRELATIONS", "elif", "feed_type", "==", "'circular'", ":", "CORRELATIONS", "=", "CIRCULAR_CORRELATIONS", "else", ":", "raise", "ValueError", "(", "\"Invalid feed_type '{}'. \"", "\"Should be 'linear' or 'circular'\"", ")", "return", "collections", ".", "OrderedDict", "(", "(", "c", ",", "_re_im_filenames", "(", "c", ",", "template", ")", ")", "for", "c", "in", "CORRELATIONS", ")" ]
Returns a dictionary of beam filename pairs, keyed on correlation,from the cartesian product of correlations and real, imaginary pairs Given 'beam_$(corr)_$(reim).fits' returns: { 'xx' : ('beam_xx_re.fits', 'beam_xx_im.fits'), 'xy' : ('beam_xy_re.fits', 'beam_xy_im.fits'), ... 'yy' : ('beam_yy_re.fits', 'beam_yy_im.fits'), } Given 'beam_$(CORR)_$(REIM).fits' returns: { 'xx' : ('beam_XX_RE.fits', 'beam_XX_IM.fits'), 'xy' : ('beam_XY_RE.fits', 'beam_XY_IM.fits'), ... 'yy' : ('beam_YY_RE.fits', 'beam_YY_IM.fits'), }
[ "Returns", "a", "dictionary", "of", "beam", "filename", "pairs", "keyed", "on", "correlation", "from", "the", "cartesian", "product", "of", "correlations", "and", "real", "imaginary", "pairs" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/fits_beam_source_provider.py#L163-L211
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/fits_beam_source_provider.py
_open_fits_files
def _open_fits_files(filenames): """ Given a {correlation: filename} mapping for filenames returns a {correlation: file handle} mapping """ kw = { 'mode' : 'update', 'memmap' : False } def _fh(fn): """ Returns a filehandle or None if file does not exist """ return fits.open(fn, **kw) if os.path.exists(fn) else None return collections.OrderedDict( (corr, tuple(_fh(fn) for fn in files)) for corr, files in filenames.iteritems() )
python
def _open_fits_files(filenames): """ Given a {correlation: filename} mapping for filenames returns a {correlation: file handle} mapping """ kw = { 'mode' : 'update', 'memmap' : False } def _fh(fn): """ Returns a filehandle or None if file does not exist """ return fits.open(fn, **kw) if os.path.exists(fn) else None return collections.OrderedDict( (corr, tuple(_fh(fn) for fn in files)) for corr, files in filenames.iteritems() )
[ "def", "_open_fits_files", "(", "filenames", ")", ":", "kw", "=", "{", "'mode'", ":", "'update'", ",", "'memmap'", ":", "False", "}", "def", "_fh", "(", "fn", ")", ":", "\"\"\" Returns a filehandle or None if file does not exist \"\"\"", "return", "fits", ".", "open", "(", "fn", ",", "*", "*", "kw", ")", "if", "os", ".", "path", ".", "exists", "(", "fn", ")", "else", "None", "return", "collections", ".", "OrderedDict", "(", "(", "corr", ",", "tuple", "(", "_fh", "(", "fn", ")", "for", "fn", "in", "files", ")", ")", "for", "corr", ",", "files", "in", "filenames", ".", "iteritems", "(", ")", ")" ]
Given a {correlation: filename} mapping for filenames returns a {correlation: file handle} mapping
[ "Given", "a", "{", "correlation", ":", "filename", "}", "mapping", "for", "filenames", "returns", "a", "{", "correlation", ":", "file", "handle", "}", "mapping" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/fits_beam_source_provider.py#L213-L226
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/fits_beam_source_provider.py
_create_axes
def _create_axes(filenames, file_dict): """ Create a FitsAxes object """ try: # Loop through the file_dictionary, finding the # first open FITS file. f = iter(f for tup in file_dict.itervalues() for f in tup if f is not None).next() except StopIteration as e: raise (ValueError("No FITS files were found. " "Searched filenames: '{f}'." .format( f=filenames.values())), None, sys.exc_info()[2]) # Create a FitsAxes object axes = FitsAxes(f[0].header) # Scale any axes in degrees to radians for i, u in enumerate(axes.cunit): if u == 'DEG': axes.cunit[i] = 'RAD' axes.set_axis_scale(i, np.pi/180.0) return axes
python
def _create_axes(filenames, file_dict): """ Create a FitsAxes object """ try: # Loop through the file_dictionary, finding the # first open FITS file. f = iter(f for tup in file_dict.itervalues() for f in tup if f is not None).next() except StopIteration as e: raise (ValueError("No FITS files were found. " "Searched filenames: '{f}'." .format( f=filenames.values())), None, sys.exc_info()[2]) # Create a FitsAxes object axes = FitsAxes(f[0].header) # Scale any axes in degrees to radians for i, u in enumerate(axes.cunit): if u == 'DEG': axes.cunit[i] = 'RAD' axes.set_axis_scale(i, np.pi/180.0) return axes
[ "def", "_create_axes", "(", "filenames", ",", "file_dict", ")", ":", "try", ":", "# Loop through the file_dictionary, finding the", "# first open FITS file.", "f", "=", "iter", "(", "f", "for", "tup", "in", "file_dict", ".", "itervalues", "(", ")", "for", "f", "in", "tup", "if", "f", "is", "not", "None", ")", ".", "next", "(", ")", "except", "StopIteration", "as", "e", ":", "raise", "(", "ValueError", "(", "\"No FITS files were found. \"", "\"Searched filenames: '{f}'.\"", ".", "format", "(", "f", "=", "filenames", ".", "values", "(", ")", ")", ")", ",", "None", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", "# Create a FitsAxes object", "axes", "=", "FitsAxes", "(", "f", "[", "0", "]", ".", "header", ")", "# Scale any axes in degrees to radians", "for", "i", ",", "u", "in", "enumerate", "(", "axes", ".", "cunit", ")", ":", "if", "u", "==", "'DEG'", ":", "axes", ".", "cunit", "[", "i", "]", "=", "'RAD'", "axes", ".", "set_axis_scale", "(", "i", ",", "np", ".", "pi", "/", "180.0", ")", "return", "axes" ]
Create a FitsAxes object
[ "Create", "a", "FitsAxes", "object" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/fits_beam_source_provider.py#L237-L261
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/fits_beam_source_provider.py
FitsBeamSourceProvider._initialise
def _initialise(self, feed_type="linear"): """ Initialise the object by generating appropriate filenames, opening associated file handles and inspecting the FITS axes of these files. """ self._filenames = filenames = _create_filenames(self._filename_schema, feed_type) self._files = files = _open_fits_files(filenames) self._axes = axes = _create_axes(filenames, files) self._dim_indices = dim_indices = l_ax, m_ax, f_ax = tuple( axes.iaxis(d) for d in self._fits_dims) # Complain if we can't find required axes for i, ax in zip(dim_indices, self._fits_dims): if i == -1: raise ValueError("'%s' axis not found!" % ax) self._cube_extents = _cube_extents(axes, l_ax, m_ax, f_ax, self._l_sign, self._m_sign) self._shape = tuple(axes.naxis[d] for d in dim_indices) + (4,) self._beam_freq_map = axes.grid[f_ax] # Now describe our dimension sizes self._dim_updates = [(n, axes.naxis[i]) for n, i in zip(self._beam_dims, dim_indices)] self._initialised = True
python
def _initialise(self, feed_type="linear"): """ Initialise the object by generating appropriate filenames, opening associated file handles and inspecting the FITS axes of these files. """ self._filenames = filenames = _create_filenames(self._filename_schema, feed_type) self._files = files = _open_fits_files(filenames) self._axes = axes = _create_axes(filenames, files) self._dim_indices = dim_indices = l_ax, m_ax, f_ax = tuple( axes.iaxis(d) for d in self._fits_dims) # Complain if we can't find required axes for i, ax in zip(dim_indices, self._fits_dims): if i == -1: raise ValueError("'%s' axis not found!" % ax) self._cube_extents = _cube_extents(axes, l_ax, m_ax, f_ax, self._l_sign, self._m_sign) self._shape = tuple(axes.naxis[d] for d in dim_indices) + (4,) self._beam_freq_map = axes.grid[f_ax] # Now describe our dimension sizes self._dim_updates = [(n, axes.naxis[i]) for n, i in zip(self._beam_dims, dim_indices)] self._initialised = True
[ "def", "_initialise", "(", "self", ",", "feed_type", "=", "\"linear\"", ")", ":", "self", ".", "_filenames", "=", "filenames", "=", "_create_filenames", "(", "self", ".", "_filename_schema", ",", "feed_type", ")", "self", ".", "_files", "=", "files", "=", "_open_fits_files", "(", "filenames", ")", "self", ".", "_axes", "=", "axes", "=", "_create_axes", "(", "filenames", ",", "files", ")", "self", ".", "_dim_indices", "=", "dim_indices", "=", "l_ax", ",", "m_ax", ",", "f_ax", "=", "tuple", "(", "axes", ".", "iaxis", "(", "d", ")", "for", "d", "in", "self", ".", "_fits_dims", ")", "# Complain if we can't find required axes", "for", "i", ",", "ax", "in", "zip", "(", "dim_indices", ",", "self", ".", "_fits_dims", ")", ":", "if", "i", "==", "-", "1", ":", "raise", "ValueError", "(", "\"'%s' axis not found!\"", "%", "ax", ")", "self", ".", "_cube_extents", "=", "_cube_extents", "(", "axes", ",", "l_ax", ",", "m_ax", ",", "f_ax", ",", "self", ".", "_l_sign", ",", "self", ".", "_m_sign", ")", "self", ".", "_shape", "=", "tuple", "(", "axes", ".", "naxis", "[", "d", "]", "for", "d", "in", "dim_indices", ")", "+", "(", "4", ",", ")", "self", ".", "_beam_freq_map", "=", "axes", ".", "grid", "[", "f_ax", "]", "# Now describe our dimension sizes", "self", ".", "_dim_updates", "=", "[", "(", "n", ",", "axes", ".", "naxis", "[", "i", "]", ")", "for", "n", ",", "i", "in", "zip", "(", "self", ".", "_beam_dims", ",", "dim_indices", ")", "]", "self", ".", "_initialised", "=", "True" ]
Initialise the object by generating appropriate filenames, opening associated file handles and inspecting the FITS axes of these files.
[ "Initialise", "the", "object", "by", "generating", "appropriate", "filenames", "opening", "associated", "file", "handles", "and", "inspecting", "the", "FITS", "axes", "of", "these", "files", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/fits_beam_source_provider.py#L333-L360
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/fits_beam_source_provider.py
FitsBeamSourceProvider.ebeam
def ebeam(self, context): """ ebeam cube data source """ if context.shape != self.shape: raise ValueError("Partial feeding of the " "beam cube is not yet supported %s %s." % (context.shape, self.shape)) ebeam = np.empty(context.shape, context.dtype) # Iterate through the correlations, # assigning real and imaginary data, if present, # otherwise zeroing the correlation for i, (re, im) in enumerate(self._files.itervalues()): ebeam[:,:,:,i].real[:] = 0 if re is None else re[0].data.T ebeam[:,:,:,i].imag[:] = 0 if im is None else im[0].data.T return ebeam
python
def ebeam(self, context): """ ebeam cube data source """ if context.shape != self.shape: raise ValueError("Partial feeding of the " "beam cube is not yet supported %s %s." % (context.shape, self.shape)) ebeam = np.empty(context.shape, context.dtype) # Iterate through the correlations, # assigning real and imaginary data, if present, # otherwise zeroing the correlation for i, (re, im) in enumerate(self._files.itervalues()): ebeam[:,:,:,i].real[:] = 0 if re is None else re[0].data.T ebeam[:,:,:,i].imag[:] = 0 if im is None else im[0].data.T return ebeam
[ "def", "ebeam", "(", "self", ",", "context", ")", ":", "if", "context", ".", "shape", "!=", "self", ".", "shape", ":", "raise", "ValueError", "(", "\"Partial feeding of the \"", "\"beam cube is not yet supported %s %s.\"", "%", "(", "context", ".", "shape", ",", "self", ".", "shape", ")", ")", "ebeam", "=", "np", ".", "empty", "(", "context", ".", "shape", ",", "context", ".", "dtype", ")", "# Iterate through the correlations,", "# assigning real and imaginary data, if present,", "# otherwise zeroing the correlation", "for", "i", ",", "(", "re", ",", "im", ")", "in", "enumerate", "(", "self", ".", "_files", ".", "itervalues", "(", ")", ")", ":", "ebeam", "[", ":", ",", ":", ",", ":", ",", "i", "]", ".", "real", "[", ":", "]", "=", "0", "if", "re", "is", "None", "else", "re", "[", "0", "]", ".", "data", ".", "T", "ebeam", "[", ":", ",", ":", ",", ":", ",", "i", "]", ".", "imag", "[", ":", "]", "=", "0", "if", "im", "is", "None", "else", "im", "[", "0", "]", ".", "data", ".", "T", "return", "ebeam" ]
ebeam cube data source
[ "ebeam", "cube", "data", "source" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/fits_beam_source_provider.py#L370-L385
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sinks/ms_sink_provider.py
MSSinkProvider.model_vis
def model_vis(self, context): """ model visibility data sink """ column = self._vis_column msshape = None # Do we have a column descriptor for the supplied column? try: coldesc = self._manager.column_descriptors[column] except KeyError as e: coldesc = None # Try to get the shape from the descriptor if coldesc is not None: try: msshape = [-1] + coldesc['shape'].tolist() except KeyError as e: msshape = None # Otherwise guess it and warn if msshape is None: guessed_shape = [self._manager._nchan, 4] montblanc.log.warn("Could not obtain 'shape' from the '{c}' " "column descriptor. Guessing it is '{gs}'.".format( c=column, gs=guessed_shape)) msshape = [-1] + guessed_shape lrow, urow = MS.row_extents(context) self._manager.ordered_main_table.putcol(column, context.data.reshape(msshape), startrow=lrow, nrow=urow-lrow)
python
def model_vis(self, context): """ model visibility data sink """ column = self._vis_column msshape = None # Do we have a column descriptor for the supplied column? try: coldesc = self._manager.column_descriptors[column] except KeyError as e: coldesc = None # Try to get the shape from the descriptor if coldesc is not None: try: msshape = [-1] + coldesc['shape'].tolist() except KeyError as e: msshape = None # Otherwise guess it and warn if msshape is None: guessed_shape = [self._manager._nchan, 4] montblanc.log.warn("Could not obtain 'shape' from the '{c}' " "column descriptor. Guessing it is '{gs}'.".format( c=column, gs=guessed_shape)) msshape = [-1] + guessed_shape lrow, urow = MS.row_extents(context) self._manager.ordered_main_table.putcol(column, context.data.reshape(msshape), startrow=lrow, nrow=urow-lrow)
[ "def", "model_vis", "(", "self", ",", "context", ")", ":", "column", "=", "self", ".", "_vis_column", "msshape", "=", "None", "# Do we have a column descriptor for the supplied column?", "try", ":", "coldesc", "=", "self", ".", "_manager", ".", "column_descriptors", "[", "column", "]", "except", "KeyError", "as", "e", ":", "coldesc", "=", "None", "# Try to get the shape from the descriptor", "if", "coldesc", "is", "not", "None", ":", "try", ":", "msshape", "=", "[", "-", "1", "]", "+", "coldesc", "[", "'shape'", "]", ".", "tolist", "(", ")", "except", "KeyError", "as", "e", ":", "msshape", "=", "None", "# Otherwise guess it and warn", "if", "msshape", "is", "None", ":", "guessed_shape", "=", "[", "self", ".", "_manager", ".", "_nchan", ",", "4", "]", "montblanc", ".", "log", ".", "warn", "(", "\"Could not obtain 'shape' from the '{c}' \"", "\"column descriptor. Guessing it is '{gs}'.\"", ".", "format", "(", "c", "=", "column", ",", "gs", "=", "guessed_shape", ")", ")", "msshape", "=", "[", "-", "1", "]", "+", "guessed_shape", "lrow", ",", "urow", "=", "MS", ".", "row_extents", "(", "context", ")", "self", ".", "_manager", ".", "ordered_main_table", ".", "putcol", "(", "column", ",", "context", ".", "data", ".", "reshape", "(", "msshape", ")", ",", "startrow", "=", "lrow", ",", "nrow", "=", "urow", "-", "lrow", ")" ]
model visibility data sink
[ "model", "visibility", "data", "sink" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sinks/ms_sink_provider.py#L54-L86
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/cached_source_provider.py
_cache
def _cache(method): """ Decorator for caching data source return values Create a key index for the proxied array in the context. Iterate over the array shape descriptor e.g. (ntime, nbl, 3) returning tuples containing the lower and upper extents of string dimensions. Takes (0, d) in the case of an integer dimensions. """ @functools.wraps(method) def memoizer(self, context): # Construct the key for the given index idx = context.array_extents(context.name) key = tuple(i for t in idx for i in t) with self._lock: # Access the sub-cache for this data source array_cache = self._cache[context.name] # Cache miss, call the data source if key not in array_cache: array_cache[key] = method(context) return array_cache[key] return memoizer
python
def _cache(method): """ Decorator for caching data source return values Create a key index for the proxied array in the context. Iterate over the array shape descriptor e.g. (ntime, nbl, 3) returning tuples containing the lower and upper extents of string dimensions. Takes (0, d) in the case of an integer dimensions. """ @functools.wraps(method) def memoizer(self, context): # Construct the key for the given index idx = context.array_extents(context.name) key = tuple(i for t in idx for i in t) with self._lock: # Access the sub-cache for this data source array_cache = self._cache[context.name] # Cache miss, call the data source if key not in array_cache: array_cache[key] = method(context) return array_cache[key] return memoizer
[ "def", "_cache", "(", "method", ")", ":", "@", "functools", ".", "wraps", "(", "method", ")", "def", "memoizer", "(", "self", ",", "context", ")", ":", "# Construct the key for the given index", "idx", "=", "context", ".", "array_extents", "(", "context", ".", "name", ")", "key", "=", "tuple", "(", "i", "for", "t", "in", "idx", "for", "i", "in", "t", ")", "with", "self", ".", "_lock", ":", "# Access the sub-cache for this data source", "array_cache", "=", "self", ".", "_cache", "[", "context", ".", "name", "]", "# Cache miss, call the data source", "if", "key", "not", "in", "array_cache", ":", "array_cache", "[", "key", "]", "=", "method", "(", "context", ")", "return", "array_cache", "[", "key", "]", "return", "memoizer" ]
Decorator for caching data source return values Create a key index for the proxied array in the context. Iterate over the array shape descriptor e.g. (ntime, nbl, 3) returning tuples containing the lower and upper extents of string dimensions. Takes (0, d) in the case of an integer dimensions.
[ "Decorator", "for", "caching", "data", "source", "return", "values" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/cached_source_provider.py#L29-L56
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/cached_source_provider.py
_proxy
def _proxy(method): """ Decorator returning a method that proxies a data source. """ @functools.wraps(method) def memoizer(self, context): return method(context) return memoizer
python
def _proxy(method): """ Decorator returning a method that proxies a data source. """ @functools.wraps(method) def memoizer(self, context): return method(context) return memoizer
[ "def", "_proxy", "(", "method", ")", ":", "@", "functools", ".", "wraps", "(", "method", ")", "def", "memoizer", "(", "self", ",", "context", ")", ":", "return", "method", "(", "context", ")", "return", "memoizer" ]
Decorator returning a method that proxies a data source.
[ "Decorator", "returning", "a", "method", "that", "proxies", "a", "data", "source", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/cached_source_provider.py#L58-L66
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/cached_source_provider.py
CachedSourceProvider.start
def start(self, start_context): """ Perform any logic on solution start """ for p in self._providers: p.start(start_context) if self._clear_start: self.clear_cache()
python
def start(self, start_context): """ Perform any logic on solution start """ for p in self._providers: p.start(start_context) if self._clear_start: self.clear_cache()
[ "def", "start", "(", "self", ",", "start_context", ")", ":", "for", "p", "in", "self", ".", "_providers", ":", "p", ".", "start", "(", "start_context", ")", "if", "self", ".", "_clear_start", ":", "self", ".", "clear_cache", "(", ")" ]
Perform any logic on solution start
[ "Perform", "any", "logic", "on", "solution", "start" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/cached_source_provider.py#L132-L138
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/cached_source_provider.py
CachedSourceProvider.stop
def stop(self, stop_context): """ Perform any logic on solution stop """ for p in self._providers: p.stop(stop_context) if self._clear_stop: self.clear_cache()
python
def stop(self, stop_context): """ Perform any logic on solution stop """ for p in self._providers: p.stop(stop_context) if self._clear_stop: self.clear_cache()
[ "def", "stop", "(", "self", ",", "stop_context", ")", ":", "for", "p", "in", "self", ".", "_providers", ":", "p", ".", "stop", "(", "stop_context", ")", "if", "self", ".", "_clear_stop", ":", "self", ".", "clear_cache", "(", ")" ]
Perform any logic on solution stop
[ "Perform", "any", "logic", "on", "solution", "stop" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/cached_source_provider.py#L140-L146
ska-sa/montblanc
montblanc/impl/rime/tensorflow/config.py
default_base_ant_pairs
def default_base_ant_pairs(self, context): """ Compute base antenna pairs """ k = 0 if context.cfg['auto_correlations'] == True else 1 na = context.dim_global_size('na') gen = (i.astype(context.dtype) for i in np.triu_indices(na, k)) # Cache np.triu_indices(na, k) as its likely that (na, k) will # stay constant much of the time. Assumption here is that this # method will be grafted onto a DefaultsSourceProvider with # the appropriate members. if self._is_cached: array_cache = self._chunk_cache['default_base_ant_pairs'] key = (k, na) # Cache miss if key not in array_cache: array_cache[key] = tuple(gen) return array_cache[key] return tuple(gen)
python
def default_base_ant_pairs(self, context): """ Compute base antenna pairs """ k = 0 if context.cfg['auto_correlations'] == True else 1 na = context.dim_global_size('na') gen = (i.astype(context.dtype) for i in np.triu_indices(na, k)) # Cache np.triu_indices(na, k) as its likely that (na, k) will # stay constant much of the time. Assumption here is that this # method will be grafted onto a DefaultsSourceProvider with # the appropriate members. if self._is_cached: array_cache = self._chunk_cache['default_base_ant_pairs'] key = (k, na) # Cache miss if key not in array_cache: array_cache[key] = tuple(gen) return array_cache[key] return tuple(gen)
[ "def", "default_base_ant_pairs", "(", "self", ",", "context", ")", ":", "k", "=", "0", "if", "context", ".", "cfg", "[", "'auto_correlations'", "]", "==", "True", "else", "1", "na", "=", "context", ".", "dim_global_size", "(", "'na'", ")", "gen", "=", "(", "i", ".", "astype", "(", "context", ".", "dtype", ")", "for", "i", "in", "np", ".", "triu_indices", "(", "na", ",", "k", ")", ")", "# Cache np.triu_indices(na, k) as its likely that (na, k) will", "# stay constant much of the time. Assumption here is that this", "# method will be grafted onto a DefaultsSourceProvider with", "# the appropriate members.", "if", "self", ".", "_is_cached", ":", "array_cache", "=", "self", ".", "_chunk_cache", "[", "'default_base_ant_pairs'", "]", "key", "=", "(", "k", ",", "na", ")", "# Cache miss", "if", "key", "not", "in", "array_cache", ":", "array_cache", "[", "key", "]", "=", "tuple", "(", "gen", ")", "return", "array_cache", "[", "key", "]", "return", "tuple", "(", "gen", ")" ]
Compute base antenna pairs
[ "Compute", "base", "antenna", "pairs" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/config.py#L67-L87
ska-sa/montblanc
montblanc/impl/rime/tensorflow/config.py
default_antenna1
def default_antenna1(self, context): """ Default antenna1 values """ ant1, ant2 = default_base_ant_pairs(self, context) (tl, tu), (bl, bu) = context.dim_extents('ntime', 'nbl') ant1_result = np.empty(context.shape, context.dtype) ant1_result[:,:] = ant1[np.newaxis,bl:bu] return ant1_result
python
def default_antenna1(self, context): """ Default antenna1 values """ ant1, ant2 = default_base_ant_pairs(self, context) (tl, tu), (bl, bu) = context.dim_extents('ntime', 'nbl') ant1_result = np.empty(context.shape, context.dtype) ant1_result[:,:] = ant1[np.newaxis,bl:bu] return ant1_result
[ "def", "default_antenna1", "(", "self", ",", "context", ")", ":", "ant1", ",", "ant2", "=", "default_base_ant_pairs", "(", "self", ",", "context", ")", "(", "tl", ",", "tu", ")", ",", "(", "bl", ",", "bu", ")", "=", "context", ".", "dim_extents", "(", "'ntime'", ",", "'nbl'", ")", "ant1_result", "=", "np", ".", "empty", "(", "context", ".", "shape", ",", "context", ".", "dtype", ")", "ant1_result", "[", ":", ",", ":", "]", "=", "ant1", "[", "np", ".", "newaxis", ",", "bl", ":", "bu", "]", "return", "ant1_result" ]
Default antenna1 values
[ "Default", "antenna1", "values" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/config.py#L89-L95
ska-sa/montblanc
montblanc/impl/rime/tensorflow/config.py
default_antenna2
def default_antenna2(self, context): """ Default antenna2 values """ ant1, ant2 = default_base_ant_pairs(self, context) (tl, tu), (bl, bu) = context.dim_extents('ntime', 'nbl') ant2_result = np.empty(context.shape, context.dtype) ant2_result[:,:] = ant2[np.newaxis,bl:bu] return ant2_result
python
def default_antenna2(self, context): """ Default antenna2 values """ ant1, ant2 = default_base_ant_pairs(self, context) (tl, tu), (bl, bu) = context.dim_extents('ntime', 'nbl') ant2_result = np.empty(context.shape, context.dtype) ant2_result[:,:] = ant2[np.newaxis,bl:bu] return ant2_result
[ "def", "default_antenna2", "(", "self", ",", "context", ")", ":", "ant1", ",", "ant2", "=", "default_base_ant_pairs", "(", "self", ",", "context", ")", "(", "tl", ",", "tu", ")", ",", "(", "bl", ",", "bu", ")", "=", "context", ".", "dim_extents", "(", "'ntime'", ",", "'nbl'", ")", "ant2_result", "=", "np", ".", "empty", "(", "context", ".", "shape", ",", "context", ".", "dtype", ")", "ant2_result", "[", ":", ",", ":", "]", "=", "ant2", "[", "np", ".", "newaxis", ",", "bl", ":", "bu", "]", "return", "ant2_result" ]
Default antenna2 values
[ "Default", "antenna2", "values" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/config.py#L97-L103
ska-sa/montblanc
montblanc/impl/rime/tensorflow/config.py
identity_on_pols
def identity_on_pols(self, context): """ Returns [[1, 0], tiled up to other dimensions [0, 1]] """ A = np.empty(context.shape, context.dtype) A[:,:,:] = [[[1,0,0,1]]] return A
python
def identity_on_pols(self, context): """ Returns [[1, 0], tiled up to other dimensions [0, 1]] """ A = np.empty(context.shape, context.dtype) A[:,:,:] = [[[1,0,0,1]]] return A
[ "def", "identity_on_pols", "(", "self", ",", "context", ")", ":", "A", "=", "np", ".", "empty", "(", "context", ".", "shape", ",", "context", ".", "dtype", ")", "A", "[", ":", ",", ":", ",", ":", "]", "=", "[", "[", "[", "1", ",", "0", ",", "0", ",", "1", "]", "]", "]", "return", "A" ]
Returns [[1, 0], tiled up to other dimensions [0, 1]]
[ "Returns", "[[", "1", "0", "]", "tiled", "up", "to", "other", "dimensions", "[", "0", "1", "]]" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/config.py#L134-L141
ska-sa/montblanc
montblanc/impl/rime/tensorflow/config.py
default_stokes
def default_stokes(self, context): """ Returns [[1, 0], tiled up to other dimensions [0, 0]] """ A = np.empty(context.shape, context.dtype) A[:,:,:] = [[[1,0,0,0]]] return A
python
def default_stokes(self, context): """ Returns [[1, 0], tiled up to other dimensions [0, 0]] """ A = np.empty(context.shape, context.dtype) A[:,:,:] = [[[1,0,0,0]]] return A
[ "def", "default_stokes", "(", "self", ",", "context", ")", ":", "A", "=", "np", ".", "empty", "(", "context", ".", "shape", ",", "context", ".", "dtype", ")", "A", "[", ":", ",", ":", ",", ":", "]", "=", "[", "[", "[", "1", ",", "0", ",", "0", ",", "0", "]", "]", "]", "return", "A" ]
Returns [[1, 0], tiled up to other dimensions [0, 0]]
[ "Returns", "[[", "1", "0", "]", "tiled", "up", "to", "other", "dimensions", "[", "0", "0", "]]" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/config.py#L143-L150
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/ms_source_provider.py
MSSourceProvider.frequency
def frequency(self, context): """ Frequency data source """ channels = self._manager.spectral_window_table.getcol(MS.CHAN_FREQ) return channels.reshape(context.shape).astype(context.dtype)
python
def frequency(self, context): """ Frequency data source """ channels = self._manager.spectral_window_table.getcol(MS.CHAN_FREQ) return channels.reshape(context.shape).astype(context.dtype)
[ "def", "frequency", "(", "self", ",", "context", ")", ":", "channels", "=", "self", ".", "_manager", ".", "spectral_window_table", ".", "getcol", "(", "MS", ".", "CHAN_FREQ", ")", "return", "channels", ".", "reshape", "(", "context", ".", "shape", ")", ".", "astype", "(", "context", ".", "dtype", ")" ]
Frequency data source
[ "Frequency", "data", "source" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/ms_source_provider.py#L93-L96
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/ms_source_provider.py
MSSourceProvider.ref_frequency
def ref_frequency(self, context): """ Reference frequency data source """ num_chans = self._manager.spectral_window_table.getcol(MS.NUM_CHAN) ref_freqs = self._manager.spectral_window_table.getcol(MS.REF_FREQUENCY) data = np.hstack((np.repeat(rf, bs) for bs, rf in zip(num_chans, ref_freqs))) return data.reshape(context.shape).astype(context.dtype)
python
def ref_frequency(self, context): """ Reference frequency data source """ num_chans = self._manager.spectral_window_table.getcol(MS.NUM_CHAN) ref_freqs = self._manager.spectral_window_table.getcol(MS.REF_FREQUENCY) data = np.hstack((np.repeat(rf, bs) for bs, rf in zip(num_chans, ref_freqs))) return data.reshape(context.shape).astype(context.dtype)
[ "def", "ref_frequency", "(", "self", ",", "context", ")", ":", "num_chans", "=", "self", ".", "_manager", ".", "spectral_window_table", ".", "getcol", "(", "MS", ".", "NUM_CHAN", ")", "ref_freqs", "=", "self", ".", "_manager", ".", "spectral_window_table", ".", "getcol", "(", "MS", ".", "REF_FREQUENCY", ")", "data", "=", "np", ".", "hstack", "(", "(", "np", ".", "repeat", "(", "rf", ",", "bs", ")", "for", "bs", ",", "rf", "in", "zip", "(", "num_chans", ",", "ref_freqs", ")", ")", ")", "return", "data", ".", "reshape", "(", "context", ".", "shape", ")", ".", "astype", "(", "context", ".", "dtype", ")" ]
Reference frequency data source
[ "Reference", "frequency", "data", "source" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/ms_source_provider.py#L98-L104
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/ms_source_provider.py
MSSourceProvider.uvw
def uvw(self, context): """ Per-antenna UVW coordinate data source """ # Hacky access of private member cube = context._cube # Create antenna1 source context a1_actual = cube.array("antenna1", reify=True) a1_ctx = SourceContext("antenna1", cube, context.cfg, context.iter_args, cube.array("antenna1"), a1_actual.shape, a1_actual.dtype) # Create antenna2 source context a2_actual = cube.array("antenna2", reify=True) a2_ctx = SourceContext("antenna2", cube, context.cfg, context.iter_args, cube.array("antenna2"), a2_actual.shape, a2_actual.dtype) # Get antenna1 and antenna2 data ant1 = self.antenna1(a1_ctx).ravel() ant2 = self.antenna2(a2_ctx).ravel() # Obtain per baseline UVW data lrow, urow = MS.uvw_row_extents(context) uvw = self._manager.ordered_uvw_table.getcol(MS.UVW, startrow=lrow, nrow=urow-lrow) # Perform the per-antenna UVW decomposition ntime, nbl = context.dim_extent_size('ntime', 'nbl') na = context.dim_global_size('na') chunks = np.repeat(nbl, ntime).astype(ant1.dtype) auvw = mbu.antenna_uvw(uvw, ant1, ant2, chunks, nr_of_antenna=na) return auvw.reshape(context.shape).astype(context.dtype)
python
def uvw(self, context): """ Per-antenna UVW coordinate data source """ # Hacky access of private member cube = context._cube # Create antenna1 source context a1_actual = cube.array("antenna1", reify=True) a1_ctx = SourceContext("antenna1", cube, context.cfg, context.iter_args, cube.array("antenna1"), a1_actual.shape, a1_actual.dtype) # Create antenna2 source context a2_actual = cube.array("antenna2", reify=True) a2_ctx = SourceContext("antenna2", cube, context.cfg, context.iter_args, cube.array("antenna2"), a2_actual.shape, a2_actual.dtype) # Get antenna1 and antenna2 data ant1 = self.antenna1(a1_ctx).ravel() ant2 = self.antenna2(a2_ctx).ravel() # Obtain per baseline UVW data lrow, urow = MS.uvw_row_extents(context) uvw = self._manager.ordered_uvw_table.getcol(MS.UVW, startrow=lrow, nrow=urow-lrow) # Perform the per-antenna UVW decomposition ntime, nbl = context.dim_extent_size('ntime', 'nbl') na = context.dim_global_size('na') chunks = np.repeat(nbl, ntime).astype(ant1.dtype) auvw = mbu.antenna_uvw(uvw, ant1, ant2, chunks, nr_of_antenna=na) return auvw.reshape(context.shape).astype(context.dtype)
[ "def", "uvw", "(", "self", ",", "context", ")", ":", "# Hacky access of private member", "cube", "=", "context", ".", "_cube", "# Create antenna1 source context", "a1_actual", "=", "cube", ".", "array", "(", "\"antenna1\"", ",", "reify", "=", "True", ")", "a1_ctx", "=", "SourceContext", "(", "\"antenna1\"", ",", "cube", ",", "context", ".", "cfg", ",", "context", ".", "iter_args", ",", "cube", ".", "array", "(", "\"antenna1\"", ")", ",", "a1_actual", ".", "shape", ",", "a1_actual", ".", "dtype", ")", "# Create antenna2 source context", "a2_actual", "=", "cube", ".", "array", "(", "\"antenna2\"", ",", "reify", "=", "True", ")", "a2_ctx", "=", "SourceContext", "(", "\"antenna2\"", ",", "cube", ",", "context", ".", "cfg", ",", "context", ".", "iter_args", ",", "cube", ".", "array", "(", "\"antenna2\"", ")", ",", "a2_actual", ".", "shape", ",", "a2_actual", ".", "dtype", ")", "# Get antenna1 and antenna2 data", "ant1", "=", "self", ".", "antenna1", "(", "a1_ctx", ")", ".", "ravel", "(", ")", "ant2", "=", "self", ".", "antenna2", "(", "a2_ctx", ")", ".", "ravel", "(", ")", "# Obtain per baseline UVW data", "lrow", ",", "urow", "=", "MS", ".", "uvw_row_extents", "(", "context", ")", "uvw", "=", "self", ".", "_manager", ".", "ordered_uvw_table", ".", "getcol", "(", "MS", ".", "UVW", ",", "startrow", "=", "lrow", ",", "nrow", "=", "urow", "-", "lrow", ")", "# Perform the per-antenna UVW decomposition", "ntime", ",", "nbl", "=", "context", ".", "dim_extent_size", "(", "'ntime'", ",", "'nbl'", ")", "na", "=", "context", ".", "dim_global_size", "(", "'na'", ")", "chunks", "=", "np", ".", "repeat", "(", "nbl", ",", "ntime", ")", ".", "astype", "(", "ant1", ".", "dtype", ")", "auvw", "=", "mbu", ".", "antenna_uvw", "(", "uvw", ",", "ant1", ",", "ant2", ",", "chunks", ",", "nr_of_antenna", "=", "na", ")", "return", "auvw", ".", "reshape", "(", "context", ".", "shape", ")", ".", "astype", "(", "context", ".", "dtype", ")" ]
Per-antenna UVW coordinate data source
[ "Per", "-", "antenna", "UVW", "coordinate", "data", "source" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/ms_source_provider.py#L106-L141
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/ms_source_provider.py
MSSourceProvider.antenna1
def antenna1(self, context): """ antenna1 data source """ lrow, urow = MS.uvw_row_extents(context) antenna1 = self._manager.ordered_uvw_table.getcol( MS.ANTENNA1, startrow=lrow, nrow=urow-lrow) return antenna1.reshape(context.shape).astype(context.dtype)
python
def antenna1(self, context): """ antenna1 data source """ lrow, urow = MS.uvw_row_extents(context) antenna1 = self._manager.ordered_uvw_table.getcol( MS.ANTENNA1, startrow=lrow, nrow=urow-lrow) return antenna1.reshape(context.shape).astype(context.dtype)
[ "def", "antenna1", "(", "self", ",", "context", ")", ":", "lrow", ",", "urow", "=", "MS", ".", "uvw_row_extents", "(", "context", ")", "antenna1", "=", "self", ".", "_manager", ".", "ordered_uvw_table", ".", "getcol", "(", "MS", ".", "ANTENNA1", ",", "startrow", "=", "lrow", ",", "nrow", "=", "urow", "-", "lrow", ")", "return", "antenna1", ".", "reshape", "(", "context", ".", "shape", ")", ".", "astype", "(", "context", ".", "dtype", ")" ]
antenna1 data source
[ "antenna1", "data", "source" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/ms_source_provider.py#L143-L149
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/ms_source_provider.py
MSSourceProvider.antenna2
def antenna2(self, context): """ antenna2 data source """ lrow, urow = MS.uvw_row_extents(context) antenna2 = self._manager.ordered_uvw_table.getcol( MS.ANTENNA2, startrow=lrow, nrow=urow-lrow) return antenna2.reshape(context.shape).astype(context.dtype)
python
def antenna2(self, context): """ antenna2 data source """ lrow, urow = MS.uvw_row_extents(context) antenna2 = self._manager.ordered_uvw_table.getcol( MS.ANTENNA2, startrow=lrow, nrow=urow-lrow) return antenna2.reshape(context.shape).astype(context.dtype)
[ "def", "antenna2", "(", "self", ",", "context", ")", ":", "lrow", ",", "urow", "=", "MS", ".", "uvw_row_extents", "(", "context", ")", "antenna2", "=", "self", ".", "_manager", ".", "ordered_uvw_table", ".", "getcol", "(", "MS", ".", "ANTENNA2", ",", "startrow", "=", "lrow", ",", "nrow", "=", "urow", "-", "lrow", ")", "return", "antenna2", ".", "reshape", "(", "context", ".", "shape", ")", ".", "astype", "(", "context", ".", "dtype", ")" ]
antenna2 data source
[ "antenna2", "data", "source" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/ms_source_provider.py#L151-L157
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/ms_source_provider.py
MSSourceProvider.parallactic_angles
def parallactic_angles(self, context): """ parallactic angle data source """ # Time and antenna extents (lt, ut), (la, ua) = context.dim_extents('ntime', 'na') return (mbu.parallactic_angles(self._times[lt:ut], self._antenna_positions[la:ua], self._phase_dir) .reshape(context.shape) .astype(context.dtype))
python
def parallactic_angles(self, context): """ parallactic angle data source """ # Time and antenna extents (lt, ut), (la, ua) = context.dim_extents('ntime', 'na') return (mbu.parallactic_angles(self._times[lt:ut], self._antenna_positions[la:ua], self._phase_dir) .reshape(context.shape) .astype(context.dtype))
[ "def", "parallactic_angles", "(", "self", ",", "context", ")", ":", "# Time and antenna extents", "(", "lt", ",", "ut", ")", ",", "(", "la", ",", "ua", ")", "=", "context", ".", "dim_extents", "(", "'ntime'", ",", "'na'", ")", "return", "(", "mbu", ".", "parallactic_angles", "(", "self", ".", "_times", "[", "lt", ":", "ut", "]", ",", "self", ".", "_antenna_positions", "[", "la", ":", "ua", "]", ",", "self", ".", "_phase_dir", ")", ".", "reshape", "(", "context", ".", "shape", ")", ".", "astype", "(", "context", ".", "dtype", ")", ")" ]
parallactic angle data source
[ "parallactic", "angle", "data", "source" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/ms_source_provider.py#L159-L167
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/ms_source_provider.py
MSSourceProvider.observed_vis
def observed_vis(self, context): """ Observed visibility data source """ lrow, urow = MS.row_extents(context) data = self._manager.ordered_main_table.getcol( self._vis_column, startrow=lrow, nrow=urow-lrow) return data.reshape(context.shape).astype(context.dtype)
python
def observed_vis(self, context): """ Observed visibility data source """ lrow, urow = MS.row_extents(context) data = self._manager.ordered_main_table.getcol( self._vis_column, startrow=lrow, nrow=urow-lrow) return data.reshape(context.shape).astype(context.dtype)
[ "def", "observed_vis", "(", "self", ",", "context", ")", ":", "lrow", ",", "urow", "=", "MS", ".", "row_extents", "(", "context", ")", "data", "=", "self", ".", "_manager", ".", "ordered_main_table", ".", "getcol", "(", "self", ".", "_vis_column", ",", "startrow", "=", "lrow", ",", "nrow", "=", "urow", "-", "lrow", ")", "return", "data", ".", "reshape", "(", "context", ".", "shape", ")", ".", "astype", "(", "context", ".", "dtype", ")" ]
Observed visibility data source
[ "Observed", "visibility", "data", "source" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/ms_source_provider.py#L170-L177
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/ms_source_provider.py
MSSourceProvider.flag
def flag(self, context): """ Flag data source """ lrow, urow = MS.row_extents(context) flag = self._manager.ordered_main_table.getcol( MS.FLAG, startrow=lrow, nrow=urow-lrow) return flag.reshape(context.shape).astype(context.dtype)
python
def flag(self, context): """ Flag data source """ lrow, urow = MS.row_extents(context) flag = self._manager.ordered_main_table.getcol( MS.FLAG, startrow=lrow, nrow=urow-lrow) return flag.reshape(context.shape).astype(context.dtype)
[ "def", "flag", "(", "self", ",", "context", ")", ":", "lrow", ",", "urow", "=", "MS", ".", "row_extents", "(", "context", ")", "flag", "=", "self", ".", "_manager", ".", "ordered_main_table", ".", "getcol", "(", "MS", ".", "FLAG", ",", "startrow", "=", "lrow", ",", "nrow", "=", "urow", "-", "lrow", ")", "return", "flag", ".", "reshape", "(", "context", ".", "shape", ")", ".", "astype", "(", "context", ".", "dtype", ")" ]
Flag data source
[ "Flag", "data", "source" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/ms_source_provider.py#L179-L186
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/ms_source_provider.py
MSSourceProvider.weight
def weight(self, context): """ Weight data source """ lrow, urow = MS.row_extents(context) weight = self._manager.ordered_main_table.getcol( MS.WEIGHT, startrow=lrow, nrow=urow-lrow) # WEIGHT is applied across all channels weight = np.repeat(weight, self._manager.channels_per_band, 0) return weight.reshape(context.shape).astype(context.dtype)
python
def weight(self, context): """ Weight data source """ lrow, urow = MS.row_extents(context) weight = self._manager.ordered_main_table.getcol( MS.WEIGHT, startrow=lrow, nrow=urow-lrow) # WEIGHT is applied across all channels weight = np.repeat(weight, self._manager.channels_per_band, 0) return weight.reshape(context.shape).astype(context.dtype)
[ "def", "weight", "(", "self", ",", "context", ")", ":", "lrow", ",", "urow", "=", "MS", ".", "row_extents", "(", "context", ")", "weight", "=", "self", ".", "_manager", ".", "ordered_main_table", ".", "getcol", "(", "MS", ".", "WEIGHT", ",", "startrow", "=", "lrow", ",", "nrow", "=", "urow", "-", "lrow", ")", "# WEIGHT is applied across all channels", "weight", "=", "np", ".", "repeat", "(", "weight", ",", "self", ".", "_manager", ".", "channels_per_band", ",", "0", ")", "return", "weight", ".", "reshape", "(", "context", ".", "shape", ")", ".", "astype", "(", "context", ".", "dtype", ")" ]
Weight data source
[ "Weight", "data", "source" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/ms_source_provider.py#L188-L197
ska-sa/montblanc
montblanc/impl/rime/tensorflow/__init__.py
load_tf_lib
def load_tf_lib(): """ Load the tensorflow library """ from os.path import join as pjoin import pkg_resources import tensorflow as tf path = pjoin('ext', 'rime.so') rime_lib_path = pkg_resources.resource_filename("montblanc", path) return tf.load_op_library(rime_lib_path)
python
def load_tf_lib(): """ Load the tensorflow library """ from os.path import join as pjoin import pkg_resources import tensorflow as tf path = pjoin('ext', 'rime.so') rime_lib_path = pkg_resources.resource_filename("montblanc", path) return tf.load_op_library(rime_lib_path)
[ "def", "load_tf_lib", "(", ")", ":", "from", "os", ".", "path", "import", "join", "as", "pjoin", "import", "pkg_resources", "import", "tensorflow", "as", "tf", "path", "=", "pjoin", "(", "'ext'", ",", "'rime.so'", ")", "rime_lib_path", "=", "pkg_resources", ".", "resource_filename", "(", "\"montblanc\"", ",", "path", ")", "return", "tf", ".", "load_op_library", "(", "rime_lib_path", ")" ]
Load the tensorflow library
[ "Load", "the", "tensorflow", "library" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/__init__.py#L21-L30
ska-sa/montblanc
montblanc/configuration.py
raise_validator_errors
def raise_validator_errors(validator): """ Raise any errors associated with the validator. Parameters ---------- validator : :class:`cerberus.Validator` Validator Raises ------ ValueError Raised if errors existed on `validator`. Message describing each error and information associated with the configuration option causing the error. """ if len(validator._errors) == 0: return def _path_str(path, name=None): """ String of the document/schema path. `cfg["foo"]["bar"]` """ L = [name] if name is not None else [] L.extend('["%s"]' % p for p in path) return "".join(L) def _path_leaf(path, dicts): """ Dictionary Leaf of the schema/document given the path """ for p in path: dicts = dicts[p] return dicts wrap = partial(textwrap.wrap, initial_indent=' '*4, subsequent_indent=' '*8) msg = ["There were configuration errors:"] for e in validator._errors: schema_leaf = _path_leaf(e.document_path, validator.schema) doc_str = _path_str(e.document_path, "cfg") msg.append("Invalid configuration option %s == '%s'." % (doc_str, e.value)) try: otype = schema_leaf["type"] msg.extend(wrap("Type must be '%s'." % otype)) except KeyError: pass try: allowed = schema_leaf["allowed"] msg.extend(wrap("Allowed values are '%s'." % allowed)) except KeyError: pass try: description = schema_leaf["__description__"] msg.extend(wrap("Description: %s" % description)) except KeyError: pass raise ValueError("\n".join(msg))
python
def raise_validator_errors(validator): """ Raise any errors associated with the validator. Parameters ---------- validator : :class:`cerberus.Validator` Validator Raises ------ ValueError Raised if errors existed on `validator`. Message describing each error and information associated with the configuration option causing the error. """ if len(validator._errors) == 0: return def _path_str(path, name=None): """ String of the document/schema path. `cfg["foo"]["bar"]` """ L = [name] if name is not None else [] L.extend('["%s"]' % p for p in path) return "".join(L) def _path_leaf(path, dicts): """ Dictionary Leaf of the schema/document given the path """ for p in path: dicts = dicts[p] return dicts wrap = partial(textwrap.wrap, initial_indent=' '*4, subsequent_indent=' '*8) msg = ["There were configuration errors:"] for e in validator._errors: schema_leaf = _path_leaf(e.document_path, validator.schema) doc_str = _path_str(e.document_path, "cfg") msg.append("Invalid configuration option %s == '%s'." % (doc_str, e.value)) try: otype = schema_leaf["type"] msg.extend(wrap("Type must be '%s'." % otype)) except KeyError: pass try: allowed = schema_leaf["allowed"] msg.extend(wrap("Allowed values are '%s'." % allowed)) except KeyError: pass try: description = schema_leaf["__description__"] msg.extend(wrap("Description: %s" % description)) except KeyError: pass raise ValueError("\n".join(msg))
[ "def", "raise_validator_errors", "(", "validator", ")", ":", "if", "len", "(", "validator", ".", "_errors", ")", "==", "0", ":", "return", "def", "_path_str", "(", "path", ",", "name", "=", "None", ")", ":", "\"\"\" String of the document/schema path. `cfg[\"foo\"][\"bar\"]` \"\"\"", "L", "=", "[", "name", "]", "if", "name", "is", "not", "None", "else", "[", "]", "L", ".", "extend", "(", "'[\"%s\"]'", "%", "p", "for", "p", "in", "path", ")", "return", "\"\"", ".", "join", "(", "L", ")", "def", "_path_leaf", "(", "path", ",", "dicts", ")", ":", "\"\"\" Dictionary Leaf of the schema/document given the path \"\"\"", "for", "p", "in", "path", ":", "dicts", "=", "dicts", "[", "p", "]", "return", "dicts", "wrap", "=", "partial", "(", "textwrap", ".", "wrap", ",", "initial_indent", "=", "' '", "*", "4", ",", "subsequent_indent", "=", "' '", "*", "8", ")", "msg", "=", "[", "\"There were configuration errors:\"", "]", "for", "e", "in", "validator", ".", "_errors", ":", "schema_leaf", "=", "_path_leaf", "(", "e", ".", "document_path", ",", "validator", ".", "schema", ")", "doc_str", "=", "_path_str", "(", "e", ".", "document_path", ",", "\"cfg\"", ")", "msg", ".", "append", "(", "\"Invalid configuration option %s == '%s'.\"", "%", "(", "doc_str", ",", "e", ".", "value", ")", ")", "try", ":", "otype", "=", "schema_leaf", "[", "\"type\"", "]", "msg", ".", "extend", "(", "wrap", "(", "\"Type must be '%s'.\"", "%", "otype", ")", ")", "except", "KeyError", ":", "pass", "try", ":", "allowed", "=", "schema_leaf", "[", "\"allowed\"", "]", "msg", ".", "extend", "(", "wrap", "(", "\"Allowed values are '%s'.\"", "%", "allowed", ")", ")", "except", "KeyError", ":", "pass", "try", ":", "description", "=", "schema_leaf", "[", "\"__description__\"", "]", "msg", ".", "extend", "(", "wrap", "(", "\"Description: %s\"", "%", "description", ")", ")", "except", "KeyError", ":", "pass", "raise", "ValueError", "(", "\"\\n\"", ".", "join", "(", "msg", ")", ")" ]
Raise any errors associated with the validator. Parameters ---------- validator : :class:`cerberus.Validator` Validator Raises ------ ValueError Raised if errors existed on `validator`. Message describing each error and information associated with the configuration option causing the error.
[ "Raise", "any", "errors", "associated", "with", "the", "validator", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/configuration.py#L10-L73
tjguk/winshell
winshell.py
indented
def indented(text, level, indent=2): """Take a multiline text and indent it as a block""" return "\n".join("%s%s" % (level * indent * " ", s) for s in text.splitlines())
python
def indented(text, level, indent=2): """Take a multiline text and indent it as a block""" return "\n".join("%s%s" % (level * indent * " ", s) for s in text.splitlines())
[ "def", "indented", "(", "text", ",", "level", ",", "indent", "=", "2", ")", ":", "return", "\"\\n\"", ".", "join", "(", "\"%s%s\"", "%", "(", "level", "*", "indent", "*", "\" \"", ",", "s", ")", "for", "s", "in", "text", ".", "splitlines", "(", ")", ")" ]
Take a multiline text and indent it as a block
[ "Take", "a", "multiline", "text", "and", "indent", "it", "as", "a", "block" ]
train
https://github.com/tjguk/winshell/blob/1509d211ab3403dd1cff6113e4e13462d6dec35b/winshell.py#L83-L85
tjguk/winshell
winshell.py
dumped
def dumped(text, level, indent=2): """Put curly brackets round an indented text""" return indented("{\n%s\n}" % indented(text, level + 1, indent) or "None", level, indent) + "\n"
python
def dumped(text, level, indent=2): """Put curly brackets round an indented text""" return indented("{\n%s\n}" % indented(text, level + 1, indent) or "None", level, indent) + "\n"
[ "def", "dumped", "(", "text", ",", "level", ",", "indent", "=", "2", ")", ":", "return", "indented", "(", "\"{\\n%s\\n}\"", "%", "indented", "(", "text", ",", "level", "+", "1", ",", "indent", ")", "or", "\"None\"", ",", "level", ",", "indent", ")", "+", "\"\\n\"" ]
Put curly brackets round an indented text
[ "Put", "curly", "brackets", "round", "an", "indented", "text" ]
train
https://github.com/tjguk/winshell/blob/1509d211ab3403dd1cff6113e4e13462d6dec35b/winshell.py#L87-L89
tjguk/winshell
winshell.py
copy_file
def copy_file( source_path, target_path, allow_undo=True, no_confirm=False, rename_on_collision=True, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file copy. Copying in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. """ return _file_operation( shellcon.FO_COPY, source_path, target_path, allow_undo, no_confirm, rename_on_collision, silent, extra_flags, hWnd )
python
def copy_file( source_path, target_path, allow_undo=True, no_confirm=False, rename_on_collision=True, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file copy. Copying in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. """ return _file_operation( shellcon.FO_COPY, source_path, target_path, allow_undo, no_confirm, rename_on_collision, silent, extra_flags, hWnd )
[ "def", "copy_file", "(", "source_path", ",", "target_path", ",", "allow_undo", "=", "True", ",", "no_confirm", "=", "False", ",", "rename_on_collision", "=", "True", ",", "silent", "=", "False", ",", "extra_flags", "=", "0", ",", "hWnd", "=", "None", ")", ":", "return", "_file_operation", "(", "shellcon", ".", "FO_COPY", ",", "source_path", ",", "target_path", ",", "allow_undo", ",", "no_confirm", ",", "rename_on_collision", ",", "silent", ",", "extra_flags", ",", "hWnd", ")" ]
Perform a shell-based file copy. Copying in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation.
[ "Perform", "a", "shell", "-", "based", "file", "copy", ".", "Copying", "in", "this", "way", "allows", "the", "possibility", "of", "undo", "auto", "-", "renaming", "and", "showing", "the", "flying", "file", "animation", "during", "the", "copy", "." ]
train
https://github.com/tjguk/winshell/blob/1509d211ab3403dd1cff6113e4e13462d6dec35b/winshell.py#L266-L294
tjguk/winshell
winshell.py
move_file
def move_file( source_path, target_path, allow_undo=True, no_confirm=False, rename_on_collision=True, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file move. Moving in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. """ return _file_operation( shellcon.FO_MOVE, source_path, target_path, allow_undo, no_confirm, rename_on_collision, silent, extra_flags, hWnd )
python
def move_file( source_path, target_path, allow_undo=True, no_confirm=False, rename_on_collision=True, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file move. Moving in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. """ return _file_operation( shellcon.FO_MOVE, source_path, target_path, allow_undo, no_confirm, rename_on_collision, silent, extra_flags, hWnd )
[ "def", "move_file", "(", "source_path", ",", "target_path", ",", "allow_undo", "=", "True", ",", "no_confirm", "=", "False", ",", "rename_on_collision", "=", "True", ",", "silent", "=", "False", ",", "extra_flags", "=", "0", ",", "hWnd", "=", "None", ")", ":", "return", "_file_operation", "(", "shellcon", ".", "FO_MOVE", ",", "source_path", ",", "target_path", ",", "allow_undo", ",", "no_confirm", ",", "rename_on_collision", ",", "silent", ",", "extra_flags", ",", "hWnd", ")" ]
Perform a shell-based file move. Moving in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation.
[ "Perform", "a", "shell", "-", "based", "file", "move", ".", "Moving", "in", "this", "way", "allows", "the", "possibility", "of", "undo", "auto", "-", "renaming", "and", "showing", "the", "flying", "file", "animation", "during", "the", "copy", "." ]
train
https://github.com/tjguk/winshell/blob/1509d211ab3403dd1cff6113e4e13462d6dec35b/winshell.py#L296-L324
tjguk/winshell
winshell.py
rename_file
def rename_file( source_path, target_path, allow_undo=True, no_confirm=False, rename_on_collision=True, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file rename. Renaming in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. """ return _file_operation( shellcon.FO_RENAME, source_path, target_path, allow_undo, no_confirm, rename_on_collision, silent, extra_flags, hWnd )
python
def rename_file( source_path, target_path, allow_undo=True, no_confirm=False, rename_on_collision=True, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file rename. Renaming in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. """ return _file_operation( shellcon.FO_RENAME, source_path, target_path, allow_undo, no_confirm, rename_on_collision, silent, extra_flags, hWnd )
[ "def", "rename_file", "(", "source_path", ",", "target_path", ",", "allow_undo", "=", "True", ",", "no_confirm", "=", "False", ",", "rename_on_collision", "=", "True", ",", "silent", "=", "False", ",", "extra_flags", "=", "0", ",", "hWnd", "=", "None", ")", ":", "return", "_file_operation", "(", "shellcon", ".", "FO_RENAME", ",", "source_path", ",", "target_path", ",", "allow_undo", ",", "no_confirm", ",", "rename_on_collision", ",", "silent", ",", "extra_flags", ",", "hWnd", ")" ]
Perform a shell-based file rename. Renaming in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation.
[ "Perform", "a", "shell", "-", "based", "file", "rename", ".", "Renaming", "in", "this", "way", "allows", "the", "possibility", "of", "undo", "auto", "-", "renaming", "and", "showing", "the", "flying", "file", "animation", "during", "the", "copy", "." ]
train
https://github.com/tjguk/winshell/blob/1509d211ab3403dd1cff6113e4e13462d6dec35b/winshell.py#L326-L354
tjguk/winshell
winshell.py
delete_file
def delete_file( source_path, allow_undo=True, no_confirm=False, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file delete. Deleting in this way uses the system recycle bin, allows the possibility of undo, and showing the "flying file" animation during the delete. The default options allow for undo, don't automatically clobber on a name clash and display the animation. """ return _file_operation( shellcon.FO_DELETE, source_path, None, allow_undo, no_confirm, False, silent, extra_flags, hWnd )
python
def delete_file( source_path, allow_undo=True, no_confirm=False, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file delete. Deleting in this way uses the system recycle bin, allows the possibility of undo, and showing the "flying file" animation during the delete. The default options allow for undo, don't automatically clobber on a name clash and display the animation. """ return _file_operation( shellcon.FO_DELETE, source_path, None, allow_undo, no_confirm, False, silent, extra_flags, hWnd )
[ "def", "delete_file", "(", "source_path", ",", "allow_undo", "=", "True", ",", "no_confirm", "=", "False", ",", "silent", "=", "False", ",", "extra_flags", "=", "0", ",", "hWnd", "=", "None", ")", ":", "return", "_file_operation", "(", "shellcon", ".", "FO_DELETE", ",", "source_path", ",", "None", ",", "allow_undo", ",", "no_confirm", ",", "False", ",", "silent", ",", "extra_flags", ",", "hWnd", ")" ]
Perform a shell-based file delete. Deleting in this way uses the system recycle bin, allows the possibility of undo, and showing the "flying file" animation during the delete. The default options allow for undo, don't automatically clobber on a name clash and display the animation.
[ "Perform", "a", "shell", "-", "based", "file", "delete", ".", "Deleting", "in", "this", "way", "uses", "the", "system", "recycle", "bin", "allows", "the", "possibility", "of", "undo", "and", "showing", "the", "flying", "file", "animation", "during", "the", "delete", "." ]
train
https://github.com/tjguk/winshell/blob/1509d211ab3403dd1cff6113e4e13462d6dec35b/winshell.py#L356-L382
tjguk/winshell
winshell.py
structured_storage
def structured_storage(filename): """Pick out info from MS documents with embedded structured storage(typically MS Word docs etc.) Returns a dictionary of information found """ if not pythoncom.StgIsStorageFile(filename): return {} flags = storagecon.STGM_READ | storagecon.STGM_SHARE_EXCLUSIVE storage = pythoncom.StgOpenStorage(filename, None, flags) try: properties_storage = storage.QueryInterface(pythoncom.IID_IPropertySetStorage) except pythoncom.com_error: return {} property_sheet = properties_storage.Open(FMTID_USER_DEFINED_PROPERTIES) try: data = property_sheet.ReadMultiple(PROPERTIES) finally: property_sheet = None title, subject, author, created_on, keywords, comments, template_used, \ updated_by, edited_on, printed_on, saved_on, \ n_pages, n_words, n_characters, \ application = data result = {} if title: result['title'] = title if subject: result['subject'] = subject if author: result['author'] = author if created_on: result['created_on'] = created_on if keywords: result['keywords'] = keywords if comments: result['comments'] = comments if template_used: result['template_used'] = template_used if updated_by: result['updated_by'] = updated_by if edited_on: result['edited_on'] = edited_on if printed_on: result['printed_on'] = printed_on if saved_on: result['saved_on'] = saved_on if n_pages: result['n_pages'] = n_pages if n_words: result['n_words'] = n_words if n_characters: result['n_characters'] = n_characters if application: result['application'] = application return result
python
def structured_storage(filename): """Pick out info from MS documents with embedded structured storage(typically MS Word docs etc.) Returns a dictionary of information found """ if not pythoncom.StgIsStorageFile(filename): return {} flags = storagecon.STGM_READ | storagecon.STGM_SHARE_EXCLUSIVE storage = pythoncom.StgOpenStorage(filename, None, flags) try: properties_storage = storage.QueryInterface(pythoncom.IID_IPropertySetStorage) except pythoncom.com_error: return {} property_sheet = properties_storage.Open(FMTID_USER_DEFINED_PROPERTIES) try: data = property_sheet.ReadMultiple(PROPERTIES) finally: property_sheet = None title, subject, author, created_on, keywords, comments, template_used, \ updated_by, edited_on, printed_on, saved_on, \ n_pages, n_words, n_characters, \ application = data result = {} if title: result['title'] = title if subject: result['subject'] = subject if author: result['author'] = author if created_on: result['created_on'] = created_on if keywords: result['keywords'] = keywords if comments: result['comments'] = comments if template_used: result['template_used'] = template_used if updated_by: result['updated_by'] = updated_by if edited_on: result['edited_on'] = edited_on if printed_on: result['printed_on'] = printed_on if saved_on: result['saved_on'] = saved_on if n_pages: result['n_pages'] = n_pages if n_words: result['n_words'] = n_words if n_characters: result['n_characters'] = n_characters if application: result['application'] = application return result
[ "def", "structured_storage", "(", "filename", ")", ":", "if", "not", "pythoncom", ".", "StgIsStorageFile", "(", "filename", ")", ":", "return", "{", "}", "flags", "=", "storagecon", ".", "STGM_READ", "|", "storagecon", ".", "STGM_SHARE_EXCLUSIVE", "storage", "=", "pythoncom", ".", "StgOpenStorage", "(", "filename", ",", "None", ",", "flags", ")", "try", ":", "properties_storage", "=", "storage", ".", "QueryInterface", "(", "pythoncom", ".", "IID_IPropertySetStorage", ")", "except", "pythoncom", ".", "com_error", ":", "return", "{", "}", "property_sheet", "=", "properties_storage", ".", "Open", "(", "FMTID_USER_DEFINED_PROPERTIES", ")", "try", ":", "data", "=", "property_sheet", ".", "ReadMultiple", "(", "PROPERTIES", ")", "finally", ":", "property_sheet", "=", "None", "title", ",", "subject", ",", "author", ",", "created_on", ",", "keywords", ",", "comments", ",", "template_used", ",", "updated_by", ",", "edited_on", ",", "printed_on", ",", "saved_on", ",", "n_pages", ",", "n_words", ",", "n_characters", ",", "application", "=", "data", "result", "=", "{", "}", "if", "title", ":", "result", "[", "'title'", "]", "=", "title", "if", "subject", ":", "result", "[", "'subject'", "]", "=", "subject", "if", "author", ":", "result", "[", "'author'", "]", "=", "author", "if", "created_on", ":", "result", "[", "'created_on'", "]", "=", "created_on", "if", "keywords", ":", "result", "[", "'keywords'", "]", "=", "keywords", "if", "comments", ":", "result", "[", "'comments'", "]", "=", "comments", "if", "template_used", ":", "result", "[", "'template_used'", "]", "=", "template_used", "if", "updated_by", ":", "result", "[", "'updated_by'", "]", "=", "updated_by", "if", "edited_on", ":", "result", "[", "'edited_on'", "]", "=", "edited_on", "if", "printed_on", ":", "result", "[", "'printed_on'", "]", "=", "printed_on", "if", "saved_on", ":", "result", "[", "'saved_on'", "]", "=", "saved_on", "if", "n_pages", ":", "result", "[", "'n_pages'", "]", "=", "n_pages", "if", "n_words", ":", "result", "[", "'n_words'", "]", "=", "n_words", "if", "n_characters", ":", "result", "[", "'n_characters'", "]", "=", "n_characters", "if", "application", ":", "result", "[", "'application'", "]", "=", "application", "return", "result" ]
Pick out info from MS documents with embedded structured storage(typically MS Word docs etc.) Returns a dictionary of information found
[ "Pick", "out", "info", "from", "MS", "documents", "with", "embedded", "structured", "storage", "(", "typically", "MS", "Word", "docs", "etc", ".", ")" ]
train
https://github.com/tjguk/winshell/blob/1509d211ab3403dd1cff6113e4e13462d6dec35b/winshell.py#L586-L645
tjguk/winshell
winshell.py
CreateShortcut
def CreateShortcut(Path, Target, Arguments="", StartIn="", Icon=("", 0), Description=""): """Create a Windows shortcut: Path - As what file should the shortcut be created? Target - What command should the desktop use? Arguments - What arguments should be supplied to the command? StartIn - What folder should the command start in? Icon -(filename, index) What icon should be used for the shortcut? Description - What description should the shortcut be given? eg CreateShortcut( Path=os.path.join(desktop(), "PythonI.lnk"), Target=r"c:\python\python.exe", Icon=(r"c:\python\python.exe", 0), Description="Python Interpreter" ) """ lnk = shortcut(Target) lnk.arguments = Arguments lnk.working_directory = StartIn lnk.icon_location = Icon lnk.description = Description lnk.write(Path)
python
def CreateShortcut(Path, Target, Arguments="", StartIn="", Icon=("", 0), Description=""): """Create a Windows shortcut: Path - As what file should the shortcut be created? Target - What command should the desktop use? Arguments - What arguments should be supplied to the command? StartIn - What folder should the command start in? Icon -(filename, index) What icon should be used for the shortcut? Description - What description should the shortcut be given? eg CreateShortcut( Path=os.path.join(desktop(), "PythonI.lnk"), Target=r"c:\python\python.exe", Icon=(r"c:\python\python.exe", 0), Description="Python Interpreter" ) """ lnk = shortcut(Target) lnk.arguments = Arguments lnk.working_directory = StartIn lnk.icon_location = Icon lnk.description = Description lnk.write(Path)
[ "def", "CreateShortcut", "(", "Path", ",", "Target", ",", "Arguments", "=", "\"\"", ",", "StartIn", "=", "\"\"", ",", "Icon", "=", "(", "\"\"", ",", "0", ")", ",", "Description", "=", "\"\"", ")", ":", "lnk", "=", "shortcut", "(", "Target", ")", "lnk", ".", "arguments", "=", "Arguments", "lnk", ".", "working_directory", "=", "StartIn", "lnk", ".", "icon_location", "=", "Icon", "lnk", ".", "description", "=", "Description", "lnk", ".", "write", "(", "Path", ")" ]
Create a Windows shortcut: Path - As what file should the shortcut be created? Target - What command should the desktop use? Arguments - What arguments should be supplied to the command? StartIn - What folder should the command start in? Icon -(filename, index) What icon should be used for the shortcut? Description - What description should the shortcut be given? eg CreateShortcut( Path=os.path.join(desktop(), "PythonI.lnk"), Target=r"c:\python\python.exe", Icon=(r"c:\python\python.exe", 0), Description="Python Interpreter" )
[ "Create", "a", "Windows", "shortcut", ":" ]
train
https://github.com/tjguk/winshell/blob/1509d211ab3403dd1cff6113e4e13462d6dec35b/winshell.py#L1084-L1107
tjguk/winshell
winshell.py
ShellRecycleBin.undelete
def undelete(self, original_filepath): """Restore the most recent version of a filepath, returning the filepath it was restored to(as rename-on-collision will apply if a file already exists at that path). """ candidates = self.versions(original_filepath) if not candidates: raise x_not_found_in_recycle_bin("%s not found in the Recycle Bin" % original_filepath) # # NB Can't use max(key=...) until Python 2.6+ # newest = sorted(candidates, key=lambda entry: entry.recycle_date())[-1] return newest.undelete()
python
def undelete(self, original_filepath): """Restore the most recent version of a filepath, returning the filepath it was restored to(as rename-on-collision will apply if a file already exists at that path). """ candidates = self.versions(original_filepath) if not candidates: raise x_not_found_in_recycle_bin("%s not found in the Recycle Bin" % original_filepath) # # NB Can't use max(key=...) until Python 2.6+ # newest = sorted(candidates, key=lambda entry: entry.recycle_date())[-1] return newest.undelete()
[ "def", "undelete", "(", "self", ",", "original_filepath", ")", ":", "candidates", "=", "self", ".", "versions", "(", "original_filepath", ")", "if", "not", "candidates", ":", "raise", "x_not_found_in_recycle_bin", "(", "\"%s not found in the Recycle Bin\"", "%", "original_filepath", ")", "#", "# NB Can't use max(key=...) until Python 2.6+", "#", "newest", "=", "sorted", "(", "candidates", ",", "key", "=", "lambda", "entry", ":", "entry", ".", "recycle_date", "(", ")", ")", "[", "-", "1", "]", "return", "newest", ".", "undelete", "(", ")" ]
Restore the most recent version of a filepath, returning the filepath it was restored to(as rename-on-collision will apply if a file already exists at that path).
[ "Restore", "the", "most", "recent", "version", "of", "a", "filepath", "returning", "the", "filepath", "it", "was", "restored", "to", "(", "as", "rename", "-", "on", "-", "collision", "will", "apply", "if", "a", "file", "already", "exists", "at", "that", "path", ")", "." ]
train
https://github.com/tjguk/winshell/blob/1509d211ab3403dd1cff6113e4e13462d6dec35b/winshell.py#L937-L949
ska-sa/montblanc
montblanc/impl/rime/tensorflow/queue_wrapper.py
_get_queue_types
def _get_queue_types(fed_arrays, data_sources): """ Given a list of arrays to feed in fed_arrays, return a list of associated queue types, obtained from tuples in the data_sources dictionary """ try: return [data_sources[n].dtype for n in fed_arrays] except KeyError as e: raise ValueError("Array '{k}' has no data source!" .format(k=e.message)), None, sys.exc_info()[2]
python
def _get_queue_types(fed_arrays, data_sources): """ Given a list of arrays to feed in fed_arrays, return a list of associated queue types, obtained from tuples in the data_sources dictionary """ try: return [data_sources[n].dtype for n in fed_arrays] except KeyError as e: raise ValueError("Array '{k}' has no data source!" .format(k=e.message)), None, sys.exc_info()[2]
[ "def", "_get_queue_types", "(", "fed_arrays", ",", "data_sources", ")", ":", "try", ":", "return", "[", "data_sources", "[", "n", "]", ".", "dtype", "for", "n", "in", "fed_arrays", "]", "except", "KeyError", "as", "e", ":", "raise", "ValueError", "(", "\"Array '{k}' has no data source!\"", ".", "format", "(", "k", "=", "e", ".", "message", ")", ")", ",", "None", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]" ]
Given a list of arrays to feed in fed_arrays, return a list of associated queue types, obtained from tuples in the data_sources dictionary
[ "Given", "a", "list", "of", "arrays", "to", "feed", "in", "fed_arrays", "return", "a", "list", "of", "associated", "queue", "types", "obtained", "from", "tuples", "in", "the", "data_sources", "dictionary" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/queue_wrapper.py#L8-L18
ska-sa/montblanc
montblanc/impl/rime/tensorflow/queue_wrapper.py
create_queue_wrapper
def create_queue_wrapper(name, queue_size, fed_arrays, data_sources, *args, **kwargs): """ Arguments name: string Name of the queue queue_size: integer Size of the queue fed_arrays: list array names that will be fed by this queue data_sources: dict (lambda/method, dtype) tuples, keyed on array names """ qtype = SingleInputMultiQueueWrapper if 'count' in kwargs else QueueWrapper return qtype(name, queue_size, fed_arrays, data_sources, *args, **kwargs)
python
def create_queue_wrapper(name, queue_size, fed_arrays, data_sources, *args, **kwargs): """ Arguments name: string Name of the queue queue_size: integer Size of the queue fed_arrays: list array names that will be fed by this queue data_sources: dict (lambda/method, dtype) tuples, keyed on array names """ qtype = SingleInputMultiQueueWrapper if 'count' in kwargs else QueueWrapper return qtype(name, queue_size, fed_arrays, data_sources, *args, **kwargs)
[ "def", "create_queue_wrapper", "(", "name", ",", "queue_size", ",", "fed_arrays", ",", "data_sources", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "qtype", "=", "SingleInputMultiQueueWrapper", "if", "'count'", "in", "kwargs", "else", "QueueWrapper", "return", "qtype", "(", "name", ",", "queue_size", ",", "fed_arrays", ",", "data_sources", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Arguments name: string Name of the queue queue_size: integer Size of the queue fed_arrays: list array names that will be fed by this queue data_sources: dict (lambda/method, dtype) tuples, keyed on array names
[ "Arguments", "name", ":", "string", "Name", "of", "the", "queue", "queue_size", ":", "integer", "Size", "of", "the", "queue", "fed_arrays", ":", "list", "array", "names", "that", "will", "be", "fed", "by", "this", "queue", "data_sources", ":", "dict", "(", "lambda", "/", "method", "dtype", ")", "tuples", "keyed", "on", "array", "names" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/queue_wrapper.py#L152-L167
ska-sa/montblanc
montblanc/util/parsing.py
parse_python_assigns
def parse_python_assigns(assign_str): """ Parses a string, containing assign statements into a dictionary. .. code-block:: python h5 = katdal.open('123456789.h5') kwargs = parse_python_assigns("spw=3; scans=[1,2];" "targets='bpcal,radec';" "channels=slice(0,2048)") h5.select(**kwargs) Parameters ---------- assign_str: str Assignment string. Should only contain assignment statements assigning python literals or builtin function calls, to variable names. Multiple assignment statements should be separated by semi-colons. Returns ------- dict Dictionary { name: value } containing assignment results. """ if not assign_str: return {} def _eval_value(stmt_value): # If the statement value is a call to a builtin, try evaluate it if isinstance(stmt_value, ast.Call): func_name = stmt_value.func.id if func_name not in _BUILTIN_WHITELIST: raise ValueError("Function '%s' in '%s' is not builtin. " "Available builtins: '%s'" % (func_name, assign_str, list(_BUILTIN_WHITELIST))) # Recursively pass arguments through this same function if stmt_value.args is not None: args = tuple(_eval_value(a) for a in stmt_value.args) else: args = () # Recursively pass keyword arguments through this same function if stmt_value.keywords is not None: kwargs = {kw.arg : _eval_value(kw.value) for kw in stmt_value.keywords} else: kwargs = {} return getattr(__builtin__, func_name)(*args, **kwargs) # Try a literal eval else: return ast.literal_eval(stmt_value) # Variable dictionary variables = {} # Parse the assignment string stmts = ast.parse(assign_str, mode='single').body for i, stmt in enumerate(stmts): if not isinstance(stmt, ast.Assign): raise ValueError("Statement %d in '%s' is not a " "variable assignment." % (i, assign_str)) # Evaluate assignment lhs values = _eval_value(stmt.value) # "a = b = c" => targets 'a' and 'b' with 'c' as result for target in stmt.targets: # a = 2 if isinstance(target, ast.Name): variables[target.id] = values # Tuple/List unpacking case # (a, b) = 2 elif isinstance(target, (ast.Tuple, ast.List)): # Require all tuple/list elements to be variable names, # although anything else is probably a syntax error if not all(isinstance(e, ast.Name) for e in target.elts): raise ValueError("Tuple unpacking in assignment %d " "in expression '%s' failed as not all " "tuple contents are variable names.") # Promote for zip and length checking if not isinstance(values, (tuple, list)): elements = (values,) else: elements = values if not len(target.elts) == len(elements): raise ValueError("Unpacking '%s' into a tuple/list in " "assignment %d of expression '%s' failed. " "The number of tuple elements did not match " "the number of values." % (values, i, assign_str)) # Unpack for variable, value in zip(target.elts, elements): variables[variable.id] = value else: raise TypeError("'%s' types are not supported" "as assignment targets." % type(target)) return variables
python
def parse_python_assigns(assign_str): """ Parses a string, containing assign statements into a dictionary. .. code-block:: python h5 = katdal.open('123456789.h5') kwargs = parse_python_assigns("spw=3; scans=[1,2];" "targets='bpcal,radec';" "channels=slice(0,2048)") h5.select(**kwargs) Parameters ---------- assign_str: str Assignment string. Should only contain assignment statements assigning python literals or builtin function calls, to variable names. Multiple assignment statements should be separated by semi-colons. Returns ------- dict Dictionary { name: value } containing assignment results. """ if not assign_str: return {} def _eval_value(stmt_value): # If the statement value is a call to a builtin, try evaluate it if isinstance(stmt_value, ast.Call): func_name = stmt_value.func.id if func_name not in _BUILTIN_WHITELIST: raise ValueError("Function '%s' in '%s' is not builtin. " "Available builtins: '%s'" % (func_name, assign_str, list(_BUILTIN_WHITELIST))) # Recursively pass arguments through this same function if stmt_value.args is not None: args = tuple(_eval_value(a) for a in stmt_value.args) else: args = () # Recursively pass keyword arguments through this same function if stmt_value.keywords is not None: kwargs = {kw.arg : _eval_value(kw.value) for kw in stmt_value.keywords} else: kwargs = {} return getattr(__builtin__, func_name)(*args, **kwargs) # Try a literal eval else: return ast.literal_eval(stmt_value) # Variable dictionary variables = {} # Parse the assignment string stmts = ast.parse(assign_str, mode='single').body for i, stmt in enumerate(stmts): if not isinstance(stmt, ast.Assign): raise ValueError("Statement %d in '%s' is not a " "variable assignment." % (i, assign_str)) # Evaluate assignment lhs values = _eval_value(stmt.value) # "a = b = c" => targets 'a' and 'b' with 'c' as result for target in stmt.targets: # a = 2 if isinstance(target, ast.Name): variables[target.id] = values # Tuple/List unpacking case # (a, b) = 2 elif isinstance(target, (ast.Tuple, ast.List)): # Require all tuple/list elements to be variable names, # although anything else is probably a syntax error if not all(isinstance(e, ast.Name) for e in target.elts): raise ValueError("Tuple unpacking in assignment %d " "in expression '%s' failed as not all " "tuple contents are variable names.") # Promote for zip and length checking if not isinstance(values, (tuple, list)): elements = (values,) else: elements = values if not len(target.elts) == len(elements): raise ValueError("Unpacking '%s' into a tuple/list in " "assignment %d of expression '%s' failed. " "The number of tuple elements did not match " "the number of values." % (values, i, assign_str)) # Unpack for variable, value in zip(target.elts, elements): variables[variable.id] = value else: raise TypeError("'%s' types are not supported" "as assignment targets." % type(target)) return variables
[ "def", "parse_python_assigns", "(", "assign_str", ")", ":", "if", "not", "assign_str", ":", "return", "{", "}", "def", "_eval_value", "(", "stmt_value", ")", ":", "# If the statement value is a call to a builtin, try evaluate it", "if", "isinstance", "(", "stmt_value", ",", "ast", ".", "Call", ")", ":", "func_name", "=", "stmt_value", ".", "func", ".", "id", "if", "func_name", "not", "in", "_BUILTIN_WHITELIST", ":", "raise", "ValueError", "(", "\"Function '%s' in '%s' is not builtin. \"", "\"Available builtins: '%s'\"", "%", "(", "func_name", ",", "assign_str", ",", "list", "(", "_BUILTIN_WHITELIST", ")", ")", ")", "# Recursively pass arguments through this same function", "if", "stmt_value", ".", "args", "is", "not", "None", ":", "args", "=", "tuple", "(", "_eval_value", "(", "a", ")", "for", "a", "in", "stmt_value", ".", "args", ")", "else", ":", "args", "=", "(", ")", "# Recursively pass keyword arguments through this same function", "if", "stmt_value", ".", "keywords", "is", "not", "None", ":", "kwargs", "=", "{", "kw", ".", "arg", ":", "_eval_value", "(", "kw", ".", "value", ")", "for", "kw", "in", "stmt_value", ".", "keywords", "}", "else", ":", "kwargs", "=", "{", "}", "return", "getattr", "(", "__builtin__", ",", "func_name", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# Try a literal eval", "else", ":", "return", "ast", ".", "literal_eval", "(", "stmt_value", ")", "# Variable dictionary", "variables", "=", "{", "}", "# Parse the assignment string", "stmts", "=", "ast", ".", "parse", "(", "assign_str", ",", "mode", "=", "'single'", ")", ".", "body", "for", "i", ",", "stmt", "in", "enumerate", "(", "stmts", ")", ":", "if", "not", "isinstance", "(", "stmt", ",", "ast", ".", "Assign", ")", ":", "raise", "ValueError", "(", "\"Statement %d in '%s' is not a \"", "\"variable assignment.\"", "%", "(", "i", ",", "assign_str", ")", ")", "# Evaluate assignment lhs", "values", "=", "_eval_value", "(", "stmt", ".", "value", ")", "# \"a = b = c\" => targets 'a' and 'b' with 'c' as result", "for", "target", "in", "stmt", ".", "targets", ":", "# a = 2", "if", "isinstance", "(", "target", ",", "ast", ".", "Name", ")", ":", "variables", "[", "target", ".", "id", "]", "=", "values", "# Tuple/List unpacking case", "# (a, b) = 2", "elif", "isinstance", "(", "target", ",", "(", "ast", ".", "Tuple", ",", "ast", ".", "List", ")", ")", ":", "# Require all tuple/list elements to be variable names,", "# although anything else is probably a syntax error", "if", "not", "all", "(", "isinstance", "(", "e", ",", "ast", ".", "Name", ")", "for", "e", "in", "target", ".", "elts", ")", ":", "raise", "ValueError", "(", "\"Tuple unpacking in assignment %d \"", "\"in expression '%s' failed as not all \"", "\"tuple contents are variable names.\"", ")", "# Promote for zip and length checking", "if", "not", "isinstance", "(", "values", ",", "(", "tuple", ",", "list", ")", ")", ":", "elements", "=", "(", "values", ",", ")", "else", ":", "elements", "=", "values", "if", "not", "len", "(", "target", ".", "elts", ")", "==", "len", "(", "elements", ")", ":", "raise", "ValueError", "(", "\"Unpacking '%s' into a tuple/list in \"", "\"assignment %d of expression '%s' failed. \"", "\"The number of tuple elements did not match \"", "\"the number of values.\"", "%", "(", "values", ",", "i", ",", "assign_str", ")", ")", "# Unpack", "for", "variable", ",", "value", "in", "zip", "(", "target", ".", "elts", ",", "elements", ")", ":", "variables", "[", "variable", ".", "id", "]", "=", "value", "else", ":", "raise", "TypeError", "(", "\"'%s' types are not supported\"", "\"as assignment targets.\"", "%", "type", "(", "target", ")", ")", "return", "variables" ]
Parses a string, containing assign statements into a dictionary. .. code-block:: python h5 = katdal.open('123456789.h5') kwargs = parse_python_assigns("spw=3; scans=[1,2];" "targets='bpcal,radec';" "channels=slice(0,2048)") h5.select(**kwargs) Parameters ---------- assign_str: str Assignment string. Should only contain assignment statements assigning python literals or builtin function calls, to variable names. Multiple assignment statements should be separated by semi-colons. Returns ------- dict Dictionary { name: value } containing assignment results.
[ "Parses", "a", "string", "containing", "assign", "statements", "into", "a", "dictionary", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/parsing.py#L12-L122
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sinks/sink_provider.py
find_sinks
def find_sinks(obj): """ Returns a dictionary of sink methods found on this object, keyed on method name. Sink methods are identified by (self, context) arguments on this object. For example: def f(self, context): ... is a sink method, but def f(self, ctx): ... is not. """ SINK_ARGSPEC = ['self', 'context'] return { n: m for n, m in inspect.getmembers(obj, inspect.ismethod) if inspect.getargspec(m)[0] == SINK_ARGSPEC }
python
def find_sinks(obj): """ Returns a dictionary of sink methods found on this object, keyed on method name. Sink methods are identified by (self, context) arguments on this object. For example: def f(self, context): ... is a sink method, but def f(self, ctx): ... is not. """ SINK_ARGSPEC = ['self', 'context'] return { n: m for n, m in inspect.getmembers(obj, inspect.ismethod) if inspect.getargspec(m)[0] == SINK_ARGSPEC }
[ "def", "find_sinks", "(", "obj", ")", ":", "SINK_ARGSPEC", "=", "[", "'self'", ",", "'context'", "]", "return", "{", "n", ":", "m", "for", "n", ",", "m", "in", "inspect", ".", "getmembers", "(", "obj", ",", "inspect", ".", "ismethod", ")", "if", "inspect", ".", "getargspec", "(", "m", ")", "[", "0", "]", "==", "SINK_ARGSPEC", "}" ]
Returns a dictionary of sink methods found on this object, keyed on method name. Sink methods are identified by (self, context) arguments on this object. For example: def f(self, context): ... is a sink method, but def f(self, ctx): ... is not.
[ "Returns", "a", "dictionary", "of", "sink", "methods", "found", "on", "this", "object", "keyed", "on", "method", "name", ".", "Sink", "methods", "are", "identified", "by", "(", "self", "context", ")", "arguments", "on", "this", "object", ".", "For", "example", ":" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sinks/sink_provider.py#L53-L73
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sinks/sink_provider.py
SinkProvider.sinks
def sinks(self): """ Returns a dictionary of sink methods found on this object, keyed on method name. Sink methods are identified by (self, context) arguments on this object. For example: def f(self, context): ... is a sink method, but def f(self, ctx): ... is not. """ try: return self._sinks except AttributeError: self._sinks = find_sinks(self) return self._sinks
python
def sinks(self): """ Returns a dictionary of sink methods found on this object, keyed on method name. Sink methods are identified by (self, context) arguments on this object. For example: def f(self, context): ... is a sink method, but def f(self, ctx): ... is not. """ try: return self._sinks except AttributeError: self._sinks = find_sinks(self) return self._sinks
[ "def", "sinks", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_sinks", "except", "AttributeError", ":", "self", ".", "_sinks", "=", "find_sinks", "(", "self", ")", "return", "self", ".", "_sinks" ]
Returns a dictionary of sink methods found on this object, keyed on method name. Sink methods are identified by (self, context) arguments on this object. For example: def f(self, context): ... is a sink method, but def f(self, ctx): ... is not.
[ "Returns", "a", "dictionary", "of", "sink", "methods", "found", "on", "this", "object", "keyed", "on", "method", "name", ".", "Sink", "methods", "are", "identified", "by", "(", "self", "context", ")", "arguments", "on", "this", "object", ".", "For", "example", ":" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sinks/sink_provider.py#L97-L120
ska-sa/montblanc
montblanc/util/ant_uvw.py
_antenna_uvw
def _antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna): """ numba implementation of antenna_uvw """ if antenna1.ndim != 1: raise ValueError("antenna1 shape should be (row,)") if antenna2.ndim != 1: raise ValueError("antenna2 shape should be (row,)") if uvw.ndim != 2 or uvw.shape[1] != 3: raise ValueError("uvw shape should be (row, 3)") if not (uvw.shape[0] == antenna1.shape[0] == antenna2.shape[0]): raise ValueError("First dimension of uvw, antenna1 " "and antenna2 do not match") if chunks.ndim != 1: raise ValueError("chunks shape should be (utime,)") if nr_of_antenna < 1: raise ValueError("nr_of_antenna < 1") ant_uvw_shape = (chunks.shape[0], nr_of_antenna, 3) antenna_uvw = np.full(ant_uvw_shape, np.nan, dtype=uvw.dtype) start = 0 for ci, chunk in enumerate(chunks): end = start + chunk # one pass should be enough! _antenna_uvw_loop(uvw, antenna1, antenna2, antenna_uvw, ci, start, end) start = end return antenna_uvw
python
def _antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna): """ numba implementation of antenna_uvw """ if antenna1.ndim != 1: raise ValueError("antenna1 shape should be (row,)") if antenna2.ndim != 1: raise ValueError("antenna2 shape should be (row,)") if uvw.ndim != 2 or uvw.shape[1] != 3: raise ValueError("uvw shape should be (row, 3)") if not (uvw.shape[0] == antenna1.shape[0] == antenna2.shape[0]): raise ValueError("First dimension of uvw, antenna1 " "and antenna2 do not match") if chunks.ndim != 1: raise ValueError("chunks shape should be (utime,)") if nr_of_antenna < 1: raise ValueError("nr_of_antenna < 1") ant_uvw_shape = (chunks.shape[0], nr_of_antenna, 3) antenna_uvw = np.full(ant_uvw_shape, np.nan, dtype=uvw.dtype) start = 0 for ci, chunk in enumerate(chunks): end = start + chunk # one pass should be enough! _antenna_uvw_loop(uvw, antenna1, antenna2, antenna_uvw, ci, start, end) start = end return antenna_uvw
[ "def", "_antenna_uvw", "(", "uvw", ",", "antenna1", ",", "antenna2", ",", "chunks", ",", "nr_of_antenna", ")", ":", "if", "antenna1", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"antenna1 shape should be (row,)\"", ")", "if", "antenna2", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"antenna2 shape should be (row,)\"", ")", "if", "uvw", ".", "ndim", "!=", "2", "or", "uvw", ".", "shape", "[", "1", "]", "!=", "3", ":", "raise", "ValueError", "(", "\"uvw shape should be (row, 3)\"", ")", "if", "not", "(", "uvw", ".", "shape", "[", "0", "]", "==", "antenna1", ".", "shape", "[", "0", "]", "==", "antenna2", ".", "shape", "[", "0", "]", ")", ":", "raise", "ValueError", "(", "\"First dimension of uvw, antenna1 \"", "\"and antenna2 do not match\"", ")", "if", "chunks", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"chunks shape should be (utime,)\"", ")", "if", "nr_of_antenna", "<", "1", ":", "raise", "ValueError", "(", "\"nr_of_antenna < 1\"", ")", "ant_uvw_shape", "=", "(", "chunks", ".", "shape", "[", "0", "]", ",", "nr_of_antenna", ",", "3", ")", "antenna_uvw", "=", "np", ".", "full", "(", "ant_uvw_shape", ",", "np", ".", "nan", ",", "dtype", "=", "uvw", ".", "dtype", ")", "start", "=", "0", "for", "ci", ",", "chunk", "in", "enumerate", "(", "chunks", ")", ":", "end", "=", "start", "+", "chunk", "# one pass should be enough!", "_antenna_uvw_loop", "(", "uvw", ",", "antenna1", ",", "antenna2", ",", "antenna_uvw", ",", "ci", ",", "start", ",", "end", ")", "start", "=", "end", "return", "antenna_uvw" ]
numba implementation of antenna_uvw
[ "numba", "implementation", "of", "antenna_uvw" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/ant_uvw.py#L86-L121
ska-sa/montblanc
montblanc/util/ant_uvw.py
_raise_decomposition_errors
def _raise_decomposition_errors(uvw, antenna1, antenna2, chunks, ant_uvw, max_err): """ Raises informative exception for an invalid decomposition """ start = 0 problem_str = [] for ci, chunk in enumerate(chunks): end = start + chunk ant1 = antenna1[start:end] ant2 = antenna2[start:end] cuvw = uvw[start:end] ant1_uvw = ant_uvw[ci, ant1, :] ant2_uvw = ant_uvw[ci, ant2, :] ruvw = ant2_uvw - ant1_uvw # Identifty rows where any of the UVW components differed close = np.isclose(ruvw, cuvw) problems = np.nonzero(np.logical_or.reduce(np.invert(close), axis=1)) for row in problems[0]: problem_str.append("[row %d [%d, %d] (chunk %d)]: " "original %s recovered %s " "ant1 %s ant2 %s" % ( start+row, ant1[row], ant2[row], ci, cuvw[row], ruvw[row], ant1_uvw[row], ant2_uvw[row])) # Exit inner loop early if len(problem_str) >= max_err: break # Exit outer loop early if len(problem_str) >= max_err: break start = end # Return early if nothing was wrong if len(problem_str) == 0: return # Add a preamble and raise exception problem_str = ["Antenna UVW Decomposition Failed", "The following differences were found " "(first 100):"] + problem_str raise AntennaUVWDecompositionError('\n'.join(problem_str))
python
def _raise_decomposition_errors(uvw, antenna1, antenna2, chunks, ant_uvw, max_err): """ Raises informative exception for an invalid decomposition """ start = 0 problem_str = [] for ci, chunk in enumerate(chunks): end = start + chunk ant1 = antenna1[start:end] ant2 = antenna2[start:end] cuvw = uvw[start:end] ant1_uvw = ant_uvw[ci, ant1, :] ant2_uvw = ant_uvw[ci, ant2, :] ruvw = ant2_uvw - ant1_uvw # Identifty rows where any of the UVW components differed close = np.isclose(ruvw, cuvw) problems = np.nonzero(np.logical_or.reduce(np.invert(close), axis=1)) for row in problems[0]: problem_str.append("[row %d [%d, %d] (chunk %d)]: " "original %s recovered %s " "ant1 %s ant2 %s" % ( start+row, ant1[row], ant2[row], ci, cuvw[row], ruvw[row], ant1_uvw[row], ant2_uvw[row])) # Exit inner loop early if len(problem_str) >= max_err: break # Exit outer loop early if len(problem_str) >= max_err: break start = end # Return early if nothing was wrong if len(problem_str) == 0: return # Add a preamble and raise exception problem_str = ["Antenna UVW Decomposition Failed", "The following differences were found " "(first 100):"] + problem_str raise AntennaUVWDecompositionError('\n'.join(problem_str))
[ "def", "_raise_decomposition_errors", "(", "uvw", ",", "antenna1", ",", "antenna2", ",", "chunks", ",", "ant_uvw", ",", "max_err", ")", ":", "start", "=", "0", "problem_str", "=", "[", "]", "for", "ci", ",", "chunk", "in", "enumerate", "(", "chunks", ")", ":", "end", "=", "start", "+", "chunk", "ant1", "=", "antenna1", "[", "start", ":", "end", "]", "ant2", "=", "antenna2", "[", "start", ":", "end", "]", "cuvw", "=", "uvw", "[", "start", ":", "end", "]", "ant1_uvw", "=", "ant_uvw", "[", "ci", ",", "ant1", ",", ":", "]", "ant2_uvw", "=", "ant_uvw", "[", "ci", ",", "ant2", ",", ":", "]", "ruvw", "=", "ant2_uvw", "-", "ant1_uvw", "# Identifty rows where any of the UVW components differed", "close", "=", "np", ".", "isclose", "(", "ruvw", ",", "cuvw", ")", "problems", "=", "np", ".", "nonzero", "(", "np", ".", "logical_or", ".", "reduce", "(", "np", ".", "invert", "(", "close", ")", ",", "axis", "=", "1", ")", ")", "for", "row", "in", "problems", "[", "0", "]", ":", "problem_str", ".", "append", "(", "\"[row %d [%d, %d] (chunk %d)]: \"", "\"original %s recovered %s \"", "\"ant1 %s ant2 %s\"", "%", "(", "start", "+", "row", ",", "ant1", "[", "row", "]", ",", "ant2", "[", "row", "]", ",", "ci", ",", "cuvw", "[", "row", "]", ",", "ruvw", "[", "row", "]", ",", "ant1_uvw", "[", "row", "]", ",", "ant2_uvw", "[", "row", "]", ")", ")", "# Exit inner loop early", "if", "len", "(", "problem_str", ")", ">=", "max_err", ":", "break", "# Exit outer loop early", "if", "len", "(", "problem_str", ")", ">=", "max_err", ":", "break", "start", "=", "end", "# Return early if nothing was wrong", "if", "len", "(", "problem_str", ")", "==", "0", ":", "return", "# Add a preamble and raise exception", "problem_str", "=", "[", "\"Antenna UVW Decomposition Failed\"", ",", "\"The following differences were found \"", "\"(first 100):\"", "]", "+", "problem_str", "raise", "AntennaUVWDecompositionError", "(", "'\\n'", ".", "join", "(", "problem_str", ")", ")" ]
Raises informative exception for an invalid decomposition
[ "Raises", "informative", "exception", "for", "an", "invalid", "decomposition" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/ant_uvw.py#L128-L177
ska-sa/montblanc
montblanc/util/ant_uvw.py
_raise_missing_antenna_errors
def _raise_missing_antenna_errors(ant_uvw, max_err): """ Raises an informative error for missing antenna """ # Find antenna uvw coordinates where any UVW component was nan # nan + real == nan problems = np.nonzero(np.add.reduce(np.isnan(ant_uvw), axis=2)) problem_str = [] for c, a in zip(*problems): problem_str.append("[chunk %d antenna %d]" % (c, a)) # Exit early if len(problem_str) >= max_err: break # Return early if nothing was wrong if len(problem_str) == 0: return # Add a preamble and raise exception problem_str = ["Antenna were missing"] + problem_str raise AntennaMissingError('\n'.join(problem_str))
python
def _raise_missing_antenna_errors(ant_uvw, max_err): """ Raises an informative error for missing antenna """ # Find antenna uvw coordinates where any UVW component was nan # nan + real == nan problems = np.nonzero(np.add.reduce(np.isnan(ant_uvw), axis=2)) problem_str = [] for c, a in zip(*problems): problem_str.append("[chunk %d antenna %d]" % (c, a)) # Exit early if len(problem_str) >= max_err: break # Return early if nothing was wrong if len(problem_str) == 0: return # Add a preamble and raise exception problem_str = ["Antenna were missing"] + problem_str raise AntennaMissingError('\n'.join(problem_str))
[ "def", "_raise_missing_antenna_errors", "(", "ant_uvw", ",", "max_err", ")", ":", "# Find antenna uvw coordinates where any UVW component was nan", "# nan + real == nan", "problems", "=", "np", ".", "nonzero", "(", "np", ".", "add", ".", "reduce", "(", "np", ".", "isnan", "(", "ant_uvw", ")", ",", "axis", "=", "2", ")", ")", "problem_str", "=", "[", "]", "for", "c", ",", "a", "in", "zip", "(", "*", "problems", ")", ":", "problem_str", ".", "append", "(", "\"[chunk %d antenna %d]\"", "%", "(", "c", ",", "a", ")", ")", "# Exit early", "if", "len", "(", "problem_str", ")", ">=", "max_err", ":", "break", "# Return early if nothing was wrong", "if", "len", "(", "problem_str", ")", "==", "0", ":", "return", "# Add a preamble and raise exception", "problem_str", "=", "[", "\"Antenna were missing\"", "]", "+", "problem_str", "raise", "AntennaMissingError", "(", "'\\n'", ".", "join", "(", "problem_str", ")", ")" ]
Raises an informative error for missing antenna
[ "Raises", "an", "informative", "error", "for", "missing", "antenna" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/ant_uvw.py#L184-L205
ska-sa/montblanc
montblanc/util/ant_uvw.py
antenna_uvw
def antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna, check_missing=False, check_decomposition=False, max_err=100): """ Computes per-antenna UVW coordinates from baseline ``uvw``, ``antenna1`` and ``antenna2`` coordinates logically grouped into baseline chunks. The example below illustrates two baseline chunks of size 6 and 5, respectively. .. code-block:: python uvw = ... ant1 = np.array([0, 0, 0, 1, 1, 2, 0, 0, 0, 1, 1], dtype=np.int32) ant2 = np.array([1, 2, 3, 2, 3, 3, 1, 2, 3, 1, 2], dtype=np.int32) chunks = np.array([6, 5], dtype=np.int32) ant_uv = antenna_uvw(uvw, ant1, ant2, chunks, nr_of_antenna=4) The first antenna of the first baseline of a chunk is chosen as the origin of the antenna coordinate system, while the second antenna is set to the negative of the baseline UVW coordinate. Subsequent antenna UVW coordinates are iteratively derived from the first two coordinates. Thus, the baseline indices need not be properly ordered (within the chunk). If it is not possible to derive coordinates for an antenna, it's coordinate will be set to nan. Parameters ---------- uvw : np.ndarray Baseline UVW coordinates of shape (row, 3) antenna1 : np.ndarray Baseline first antenna of shape (row,) antenna2 : np.ndarray Baseline second antenna of shape (row,) chunks : np.ndarray Number of baselines per unique timestep with shape (chunks,) :code:`np.sum(chunks) == row` should hold. nr_of_antenna : int Total number of antenna in the solution. check_missing (optional) : bool If ``True`` raises an exception if it was not possible to compute UVW coordinates for all antenna (i.e. some were nan). Defaults to ``False``. check_decomposition (optional) : bool If ``True``, checks that the antenna decomposition accurately reproduces the coordinates in ``uvw``, or that :code:`ant_uvw[c,ant1,:] - ant_uvw[c,ant2,:] == uvw[s:e,:]` where ``s`` and ``e`` are the start and end rows of chunk ``c`` respectively. Defaults to ``False``. max_err (optional) : integer Maximum numbers of errors when checking for missing antenna or innacurate decompositions. Defaults to ``100``. Returns ------- np.ndarray Antenna UVW coordinates of shape (chunks, nr_of_antenna, 3) """ ant_uvw = _antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna) if check_missing: _raise_missing_antenna_errors(ant_uvw, max_err=max_err) if check_decomposition: _raise_decomposition_errors(uvw, antenna1, antenna2, chunks, ant_uvw, max_err=max_err) return ant_uvw
python
def antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna, check_missing=False, check_decomposition=False, max_err=100): """ Computes per-antenna UVW coordinates from baseline ``uvw``, ``antenna1`` and ``antenna2`` coordinates logically grouped into baseline chunks. The example below illustrates two baseline chunks of size 6 and 5, respectively. .. code-block:: python uvw = ... ant1 = np.array([0, 0, 0, 1, 1, 2, 0, 0, 0, 1, 1], dtype=np.int32) ant2 = np.array([1, 2, 3, 2, 3, 3, 1, 2, 3, 1, 2], dtype=np.int32) chunks = np.array([6, 5], dtype=np.int32) ant_uv = antenna_uvw(uvw, ant1, ant2, chunks, nr_of_antenna=4) The first antenna of the first baseline of a chunk is chosen as the origin of the antenna coordinate system, while the second antenna is set to the negative of the baseline UVW coordinate. Subsequent antenna UVW coordinates are iteratively derived from the first two coordinates. Thus, the baseline indices need not be properly ordered (within the chunk). If it is not possible to derive coordinates for an antenna, it's coordinate will be set to nan. Parameters ---------- uvw : np.ndarray Baseline UVW coordinates of shape (row, 3) antenna1 : np.ndarray Baseline first antenna of shape (row,) antenna2 : np.ndarray Baseline second antenna of shape (row,) chunks : np.ndarray Number of baselines per unique timestep with shape (chunks,) :code:`np.sum(chunks) == row` should hold. nr_of_antenna : int Total number of antenna in the solution. check_missing (optional) : bool If ``True`` raises an exception if it was not possible to compute UVW coordinates for all antenna (i.e. some were nan). Defaults to ``False``. check_decomposition (optional) : bool If ``True``, checks that the antenna decomposition accurately reproduces the coordinates in ``uvw``, or that :code:`ant_uvw[c,ant1,:] - ant_uvw[c,ant2,:] == uvw[s:e,:]` where ``s`` and ``e`` are the start and end rows of chunk ``c`` respectively. Defaults to ``False``. max_err (optional) : integer Maximum numbers of errors when checking for missing antenna or innacurate decompositions. Defaults to ``100``. Returns ------- np.ndarray Antenna UVW coordinates of shape (chunks, nr_of_antenna, 3) """ ant_uvw = _antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna) if check_missing: _raise_missing_antenna_errors(ant_uvw, max_err=max_err) if check_decomposition: _raise_decomposition_errors(uvw, antenna1, antenna2, chunks, ant_uvw, max_err=max_err) return ant_uvw
[ "def", "antenna_uvw", "(", "uvw", ",", "antenna1", ",", "antenna2", ",", "chunks", ",", "nr_of_antenna", ",", "check_missing", "=", "False", ",", "check_decomposition", "=", "False", ",", "max_err", "=", "100", ")", ":", "ant_uvw", "=", "_antenna_uvw", "(", "uvw", ",", "antenna1", ",", "antenna2", ",", "chunks", ",", "nr_of_antenna", ")", "if", "check_missing", ":", "_raise_missing_antenna_errors", "(", "ant_uvw", ",", "max_err", "=", "max_err", ")", "if", "check_decomposition", ":", "_raise_decomposition_errors", "(", "uvw", ",", "antenna1", ",", "antenna2", ",", "chunks", ",", "ant_uvw", ",", "max_err", "=", "max_err", ")", "return", "ant_uvw" ]
Computes per-antenna UVW coordinates from baseline ``uvw``, ``antenna1`` and ``antenna2`` coordinates logically grouped into baseline chunks. The example below illustrates two baseline chunks of size 6 and 5, respectively. .. code-block:: python uvw = ... ant1 = np.array([0, 0, 0, 1, 1, 2, 0, 0, 0, 1, 1], dtype=np.int32) ant2 = np.array([1, 2, 3, 2, 3, 3, 1, 2, 3, 1, 2], dtype=np.int32) chunks = np.array([6, 5], dtype=np.int32) ant_uv = antenna_uvw(uvw, ant1, ant2, chunks, nr_of_antenna=4) The first antenna of the first baseline of a chunk is chosen as the origin of the antenna coordinate system, while the second antenna is set to the negative of the baseline UVW coordinate. Subsequent antenna UVW coordinates are iteratively derived from the first two coordinates. Thus, the baseline indices need not be properly ordered (within the chunk). If it is not possible to derive coordinates for an antenna, it's coordinate will be set to nan. Parameters ---------- uvw : np.ndarray Baseline UVW coordinates of shape (row, 3) antenna1 : np.ndarray Baseline first antenna of shape (row,) antenna2 : np.ndarray Baseline second antenna of shape (row,) chunks : np.ndarray Number of baselines per unique timestep with shape (chunks,) :code:`np.sum(chunks) == row` should hold. nr_of_antenna : int Total number of antenna in the solution. check_missing (optional) : bool If ``True`` raises an exception if it was not possible to compute UVW coordinates for all antenna (i.e. some were nan). Defaults to ``False``. check_decomposition (optional) : bool If ``True``, checks that the antenna decomposition accurately reproduces the coordinates in ``uvw``, or that :code:`ant_uvw[c,ant1,:] - ant_uvw[c,ant2,:] == uvw[s:e,:]` where ``s`` and ``e`` are the start and end rows of chunk ``c`` respectively. Defaults to ``False``. max_err (optional) : integer Maximum numbers of errors when checking for missing antenna or innacurate decompositions. Defaults to ``100``. Returns ------- np.ndarray Antenna UVW coordinates of shape (chunks, nr_of_antenna, 3)
[ "Computes", "per", "-", "antenna", "UVW", "coordinates", "from", "baseline", "uvw", "antenna1", "and", "antenna2", "coordinates", "logically", "grouped", "into", "baseline", "chunks", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/ant_uvw.py#L208-L279
ska-sa/montblanc
montblanc/src_types.py
default_sources
def default_sources(**kwargs): """ Returns a dictionary mapping source types to number of sources. If the number of sources for the source type is supplied in the kwargs these will be placed in the dictionary. e.g. if we have 'point', 'gaussian' and 'sersic' source types, then default_sources(point=10, gaussian=20) will return an OrderedDict {'point': 10, 'gaussian': 20, 'sersic': 0} """ S = OrderedDict() total = 0 invalid_types = [t for t in kwargs.keys() if t not in SOURCE_VAR_TYPES] for t in invalid_types: montblanc.log.warning('Source type %s is not yet ' 'implemented in montblanc. ' 'Valid source types are %s' % (t, SOURCE_VAR_TYPES.keys())) # Zero all source types for k, v in SOURCE_VAR_TYPES.iteritems(): # Try get the number of sources for this source # from the kwargs value = kwargs.get(k, 0) try: value = int(value) except ValueError: raise TypeError(('Supplied value %s ' 'for source %s cannot be ' 'converted to an integer') % \ (value, k)) total += value S[k] = value # Add a point source if no others exist if total == 0: S[POINT_TYPE] = 1 return S
python
def default_sources(**kwargs): """ Returns a dictionary mapping source types to number of sources. If the number of sources for the source type is supplied in the kwargs these will be placed in the dictionary. e.g. if we have 'point', 'gaussian' and 'sersic' source types, then default_sources(point=10, gaussian=20) will return an OrderedDict {'point': 10, 'gaussian': 20, 'sersic': 0} """ S = OrderedDict() total = 0 invalid_types = [t for t in kwargs.keys() if t not in SOURCE_VAR_TYPES] for t in invalid_types: montblanc.log.warning('Source type %s is not yet ' 'implemented in montblanc. ' 'Valid source types are %s' % (t, SOURCE_VAR_TYPES.keys())) # Zero all source types for k, v in SOURCE_VAR_TYPES.iteritems(): # Try get the number of sources for this source # from the kwargs value = kwargs.get(k, 0) try: value = int(value) except ValueError: raise TypeError(('Supplied value %s ' 'for source %s cannot be ' 'converted to an integer') % \ (value, k)) total += value S[k] = value # Add a point source if no others exist if total == 0: S[POINT_TYPE] = 1 return S
[ "def", "default_sources", "(", "*", "*", "kwargs", ")", ":", "S", "=", "OrderedDict", "(", ")", "total", "=", "0", "invalid_types", "=", "[", "t", "for", "t", "in", "kwargs", ".", "keys", "(", ")", "if", "t", "not", "in", "SOURCE_VAR_TYPES", "]", "for", "t", "in", "invalid_types", ":", "montblanc", ".", "log", ".", "warning", "(", "'Source type %s is not yet '", "'implemented in montblanc. '", "'Valid source types are %s'", "%", "(", "t", ",", "SOURCE_VAR_TYPES", ".", "keys", "(", ")", ")", ")", "# Zero all source types", "for", "k", ",", "v", "in", "SOURCE_VAR_TYPES", ".", "iteritems", "(", ")", ":", "# Try get the number of sources for this source", "# from the kwargs", "value", "=", "kwargs", ".", "get", "(", "k", ",", "0", ")", "try", ":", "value", "=", "int", "(", "value", ")", "except", "ValueError", ":", "raise", "TypeError", "(", "(", "'Supplied value %s '", "'for source %s cannot be '", "'converted to an integer'", ")", "%", "(", "value", ",", "k", ")", ")", "total", "+=", "value", "S", "[", "k", "]", "=", "value", "# Add a point source if no others exist", "if", "total", "==", "0", ":", "S", "[", "POINT_TYPE", "]", "=", "1", "return", "S" ]
Returns a dictionary mapping source types to number of sources. If the number of sources for the source type is supplied in the kwargs these will be placed in the dictionary. e.g. if we have 'point', 'gaussian' and 'sersic' source types, then default_sources(point=10, gaussian=20) will return an OrderedDict {'point': 10, 'gaussian': 20, 'sersic': 0}
[ "Returns", "a", "dictionary", "mapping", "source", "types", "to", "number", "of", "sources", ".", "If", "the", "number", "of", "sources", "for", "the", "source", "type", "is", "supplied", "in", "the", "kwargs", "these", "will", "be", "placed", "in", "the", "dictionary", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/src_types.py#L61-L106
ska-sa/montblanc
montblanc/src_types.py
sources_to_nr_vars
def sources_to_nr_vars(sources): """ Converts a source type to number of sources mapping into a source numbering variable to number of sources mapping. If, for example, we have 'point', 'gaussian' and 'sersic' source types, then passing the following dict as an argument sources_to_nr_vars({'point':10, 'gaussian': 20}) will return an OrderedDict {'npsrc': 10, 'ngsrc': 20, 'nssrc': 0 } """ sources = default_sources(**sources) try: return OrderedDict((SOURCE_VAR_TYPES[name], nr) for name, nr in sources.iteritems()) except KeyError as e: raise KeyError(( 'No source type ''%s'' is ' 'registered. Valid source types ' 'are %s') % (e, SOURCE_VAR_TYPES.keys()))
python
def sources_to_nr_vars(sources): """ Converts a source type to number of sources mapping into a source numbering variable to number of sources mapping. If, for example, we have 'point', 'gaussian' and 'sersic' source types, then passing the following dict as an argument sources_to_nr_vars({'point':10, 'gaussian': 20}) will return an OrderedDict {'npsrc': 10, 'ngsrc': 20, 'nssrc': 0 } """ sources = default_sources(**sources) try: return OrderedDict((SOURCE_VAR_TYPES[name], nr) for name, nr in sources.iteritems()) except KeyError as e: raise KeyError(( 'No source type ''%s'' is ' 'registered. Valid source types ' 'are %s') % (e, SOURCE_VAR_TYPES.keys()))
[ "def", "sources_to_nr_vars", "(", "sources", ")", ":", "sources", "=", "default_sources", "(", "*", "*", "sources", ")", "try", ":", "return", "OrderedDict", "(", "(", "SOURCE_VAR_TYPES", "[", "name", "]", ",", "nr", ")", "for", "name", ",", "nr", "in", "sources", ".", "iteritems", "(", ")", ")", "except", "KeyError", "as", "e", ":", "raise", "KeyError", "(", "(", "'No source type '", "'%s'", "' is '", "'registered. Valid source types '", "'are %s'", ")", "%", "(", "e", ",", "SOURCE_VAR_TYPES", ".", "keys", "(", ")", ")", ")" ]
Converts a source type to number of sources mapping into a source numbering variable to number of sources mapping. If, for example, we have 'point', 'gaussian' and 'sersic' source types, then passing the following dict as an argument sources_to_nr_vars({'point':10, 'gaussian': 20}) will return an OrderedDict {'npsrc': 10, 'ngsrc': 20, 'nssrc': 0 }
[ "Converts", "a", "source", "type", "to", "number", "of", "sources", "mapping", "into", "a", "source", "numbering", "variable", "to", "number", "of", "sources", "mapping", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/src_types.py#L108-L132
ska-sa/montblanc
montblanc/src_types.py
source_range_tuple
def source_range_tuple(start, end, nr_var_dict): """ Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing tuples of the start and end index for each source variable type. """ starts = np.array([0 for nr_var in SOURCE_VAR_TYPES.itervalues()]) ends = np.array([nr_var_dict[nr_var] if nr_var in nr_var_dict else 0 for nr_var in SOURCE_VAR_TYPES.itervalues()]) sum_counts = np.cumsum(ends) idx = np.arange(len(starts)) # Find the intervals containing the # start and ending indices start_idx, end_idx = np.searchsorted( sum_counts, [start, end], side='right') # Handle edge cases if end >= sum_counts[-1]: end = sum_counts[-1] end_idx = len(sum_counts) - 1 # Find out which variable counts fall within the range # of the supplied indices and zero those outside this range invalid = np.logical_not(np.logical_and(start_idx <= idx, idx <= end_idx)) starts[invalid] = ends[invalid] = 0 # Modify the associated starting and ending positions starts[start_idx] = start ends[end_idx] = end if start >= sum_counts[0]: starts[start_idx] -= sum_counts[start_idx-1] if end >= sum_counts[0]: ends[end_idx] -= sum_counts[end_idx-1] return OrderedDict((n, (starts[i], ends[i])) for i, n in enumerate(SOURCE_VAR_TYPES.values()))
python
def source_range_tuple(start, end, nr_var_dict): """ Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing tuples of the start and end index for each source variable type. """ starts = np.array([0 for nr_var in SOURCE_VAR_TYPES.itervalues()]) ends = np.array([nr_var_dict[nr_var] if nr_var in nr_var_dict else 0 for nr_var in SOURCE_VAR_TYPES.itervalues()]) sum_counts = np.cumsum(ends) idx = np.arange(len(starts)) # Find the intervals containing the # start and ending indices start_idx, end_idx = np.searchsorted( sum_counts, [start, end], side='right') # Handle edge cases if end >= sum_counts[-1]: end = sum_counts[-1] end_idx = len(sum_counts) - 1 # Find out which variable counts fall within the range # of the supplied indices and zero those outside this range invalid = np.logical_not(np.logical_and(start_idx <= idx, idx <= end_idx)) starts[invalid] = ends[invalid] = 0 # Modify the associated starting and ending positions starts[start_idx] = start ends[end_idx] = end if start >= sum_counts[0]: starts[start_idx] -= sum_counts[start_idx-1] if end >= sum_counts[0]: ends[end_idx] -= sum_counts[end_idx-1] return OrderedDict((n, (starts[i], ends[i])) for i, n in enumerate(SOURCE_VAR_TYPES.values()))
[ "def", "source_range_tuple", "(", "start", ",", "end", ",", "nr_var_dict", ")", ":", "starts", "=", "np", ".", "array", "(", "[", "0", "for", "nr_var", "in", "SOURCE_VAR_TYPES", ".", "itervalues", "(", ")", "]", ")", "ends", "=", "np", ".", "array", "(", "[", "nr_var_dict", "[", "nr_var", "]", "if", "nr_var", "in", "nr_var_dict", "else", "0", "for", "nr_var", "in", "SOURCE_VAR_TYPES", ".", "itervalues", "(", ")", "]", ")", "sum_counts", "=", "np", ".", "cumsum", "(", "ends", ")", "idx", "=", "np", ".", "arange", "(", "len", "(", "starts", ")", ")", "# Find the intervals containing the", "# start and ending indices", "start_idx", ",", "end_idx", "=", "np", ".", "searchsorted", "(", "sum_counts", ",", "[", "start", ",", "end", "]", ",", "side", "=", "'right'", ")", "# Handle edge cases", "if", "end", ">=", "sum_counts", "[", "-", "1", "]", ":", "end", "=", "sum_counts", "[", "-", "1", "]", "end_idx", "=", "len", "(", "sum_counts", ")", "-", "1", "# Find out which variable counts fall within the range", "# of the supplied indices and zero those outside this range", "invalid", "=", "np", ".", "logical_not", "(", "np", ".", "logical_and", "(", "start_idx", "<=", "idx", ",", "idx", "<=", "end_idx", ")", ")", "starts", "[", "invalid", "]", "=", "ends", "[", "invalid", "]", "=", "0", "# Modify the associated starting and ending positions", "starts", "[", "start_idx", "]", "=", "start", "ends", "[", "end_idx", "]", "=", "end", "if", "start", ">=", "sum_counts", "[", "0", "]", ":", "starts", "[", "start_idx", "]", "-=", "sum_counts", "[", "start_idx", "-", "1", "]", "if", "end", ">=", "sum_counts", "[", "0", "]", ":", "ends", "[", "end_idx", "]", "-=", "sum_counts", "[", "end_idx", "-", "1", "]", "return", "OrderedDict", "(", "(", "n", ",", "(", "starts", "[", "i", "]", ",", "ends", "[", "i", "]", ")", ")", "for", "i", ",", "n", "in", "enumerate", "(", "SOURCE_VAR_TYPES", ".", "values", "(", ")", ")", ")" ]
Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing tuples of the start and end index for each source variable type.
[ "Given", "a", "range", "of", "source", "numbers", "as", "well", "as", "a", "dictionary", "containing", "the", "numbers", "of", "each", "source", "returns", "a", "dictionary", "containing", "tuples", "of", "the", "start", "and", "end", "index", "for", "each", "source", "variable", "type", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/src_types.py#L134-L174
ska-sa/montblanc
montblanc/src_types.py
source_range
def source_range(start, end, nr_var_dict): """ Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing tuples of the start and end index for each source variable type. """ return OrderedDict((k, e-s) for k, (s, e) in source_range_tuple(start, end, nr_var_dict).iteritems())
python
def source_range(start, end, nr_var_dict): """ Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing tuples of the start and end index for each source variable type. """ return OrderedDict((k, e-s) for k, (s, e) in source_range_tuple(start, end, nr_var_dict).iteritems())
[ "def", "source_range", "(", "start", ",", "end", ",", "nr_var_dict", ")", ":", "return", "OrderedDict", "(", "(", "k", ",", "e", "-", "s", ")", "for", "k", ",", "(", "s", ",", "e", ")", "in", "source_range_tuple", "(", "start", ",", "end", ",", "nr_var_dict", ")", ".", "iteritems", "(", ")", ")" ]
Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing tuples of the start and end index for each source variable type.
[ "Given", "a", "range", "of", "source", "numbers", "as", "well", "as", "a", "dictionary", "containing", "the", "numbers", "of", "each", "source", "returns", "a", "dictionary", "containing", "tuples", "of", "the", "start", "and", "end", "index", "for", "each", "source", "variable", "type", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/src_types.py#L176-L186
ska-sa/montblanc
montblanc/src_types.py
source_range_slices
def source_range_slices(start, end, nr_var_dict): """ Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing slices for each source variable type. """ return OrderedDict((k, slice(s,e,1)) for k, (s, e) in source_range_tuple(start, end, nr_var_dict).iteritems())
python
def source_range_slices(start, end, nr_var_dict): """ Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing slices for each source variable type. """ return OrderedDict((k, slice(s,e,1)) for k, (s, e) in source_range_tuple(start, end, nr_var_dict).iteritems())
[ "def", "source_range_slices", "(", "start", ",", "end", ",", "nr_var_dict", ")", ":", "return", "OrderedDict", "(", "(", "k", ",", "slice", "(", "s", ",", "e", ",", "1", ")", ")", "for", "k", ",", "(", "s", ",", "e", ")", "in", "source_range_tuple", "(", "start", ",", "end", ",", "nr_var_dict", ")", ".", "iteritems", "(", ")", ")" ]
Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing slices for each source variable type.
[ "Given", "a", "range", "of", "source", "numbers", "as", "well", "as", "a", "dictionary", "containing", "the", "numbers", "of", "each", "source", "returns", "a", "dictionary", "containing", "slices", "for", "each", "source", "variable", "type", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/src_types.py#L188-L197
ska-sa/montblanc
montblanc/examples/MS_tf_example.py
RadioSourceProvider.point_lm
def point_lm(self, context): """ Return a lm coordinate array to montblanc """ lm = np.empty(context.shape, context.dtype) # Print the array schema montblanc.log.info(context.array_schema.shape) # Print the space of iteration montblanc.log.info(context.iter_args) (ls, us) = context.dim_extents('npsrc') lm[:,0] = 0.0008 lm[:,1] = 0.0036 lm[:,:] = 0 return lm
python
def point_lm(self, context): """ Return a lm coordinate array to montblanc """ lm = np.empty(context.shape, context.dtype) # Print the array schema montblanc.log.info(context.array_schema.shape) # Print the space of iteration montblanc.log.info(context.iter_args) (ls, us) = context.dim_extents('npsrc') lm[:,0] = 0.0008 lm[:,1] = 0.0036 lm[:,:] = 0 return lm
[ "def", "point_lm", "(", "self", ",", "context", ")", ":", "lm", "=", "np", ".", "empty", "(", "context", ".", "shape", ",", "context", ".", "dtype", ")", "# Print the array schema", "montblanc", ".", "log", ".", "info", "(", "context", ".", "array_schema", ".", "shape", ")", "# Print the space of iteration", "montblanc", ".", "log", ".", "info", "(", "context", ".", "iter_args", ")", "(", "ls", ",", "us", ")", "=", "context", ".", "dim_extents", "(", "'npsrc'", ")", "lm", "[", ":", ",", "0", "]", "=", "0.0008", "lm", "[", ":", ",", "1", "]", "=", "0.0036", "lm", "[", ":", ",", ":", "]", "=", "0", "return", "lm" ]
Return a lm coordinate array to montblanc
[ "Return", "a", "lm", "coordinate", "array", "to", "montblanc" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/examples/MS_tf_example.py#L44-L59
ska-sa/montblanc
montblanc/examples/MS_tf_example.py
RadioSourceProvider.point_stokes
def point_stokes(self, context): """ Return a stokes parameter array to montblanc """ stokes = np.empty(context.shape, context.dtype) stokes[:,:,0] = 1 stokes[:,:,1:4] = 0 return stokes
python
def point_stokes(self, context): """ Return a stokes parameter array to montblanc """ stokes = np.empty(context.shape, context.dtype) stokes[:,:,0] = 1 stokes[:,:,1:4] = 0 return stokes
[ "def", "point_stokes", "(", "self", ",", "context", ")", ":", "stokes", "=", "np", ".", "empty", "(", "context", ".", "shape", ",", "context", ".", "dtype", ")", "stokes", "[", ":", ",", ":", ",", "0", "]", "=", "1", "stokes", "[", ":", ",", ":", ",", "1", ":", "4", "]", "=", "0", "return", "stokes" ]
Return a stokes parameter array to montblanc
[ "Return", "a", "stokes", "parameter", "array", "to", "montblanc" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/examples/MS_tf_example.py#L61-L66
ska-sa/montblanc
montblanc/examples/MS_tf_example.py
RadioSourceProvider.ref_frequency
def ref_frequency(self, context): """ Return a reference frequency array to montblanc """ ref_freq = np.empty(context.shape, context.dtype) ref_freq[:] = 1.415e9 return ref_freq
python
def ref_frequency(self, context): """ Return a reference frequency array to montblanc """ ref_freq = np.empty(context.shape, context.dtype) ref_freq[:] = 1.415e9 return ref_freq
[ "def", "ref_frequency", "(", "self", ",", "context", ")", ":", "ref_freq", "=", "np", ".", "empty", "(", "context", ".", "shape", ",", "context", ".", "dtype", ")", "ref_freq", "[", ":", "]", "=", "1.415e9", "return", "ref_freq" ]
Return a reference frequency array to montblanc
[ "Return", "a", "reference", "frequency", "array", "to", "montblanc" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/examples/MS_tf_example.py#L76-L81
kislyuk/aegea
aegea/packages/github3/auths.py
Authorization.update
def update(self, scopes=[], add_scopes=[], rm_scopes=[], note='', note_url=''): """Update this authorization. :param list scopes: (optional), replaces the authorization scopes with these :param list add_scopes: (optional), scopes to be added :param list rm_scopes: (optional), scopes to be removed :param str note: (optional), new note about authorization :param str note_url: (optional), new note URL about this authorization :returns: bool """ success = False json = None if scopes: d = {'scopes': scopes} json = self._json(self._post(self._api, data=d), 200) if add_scopes: d = {'add_scopes': add_scopes} json = self._json(self._post(self._api, data=d), 200) if rm_scopes: d = {'remove_scopes': rm_scopes} json = self._json(self._post(self._api, data=d), 200) if note or note_url: d = {'note': note, 'note_url': note_url} json = self._json(self._post(self._api, data=d), 200) if json: self._update_(json) success = True return success
python
def update(self, scopes=[], add_scopes=[], rm_scopes=[], note='', note_url=''): """Update this authorization. :param list scopes: (optional), replaces the authorization scopes with these :param list add_scopes: (optional), scopes to be added :param list rm_scopes: (optional), scopes to be removed :param str note: (optional), new note about authorization :param str note_url: (optional), new note URL about this authorization :returns: bool """ success = False json = None if scopes: d = {'scopes': scopes} json = self._json(self._post(self._api, data=d), 200) if add_scopes: d = {'add_scopes': add_scopes} json = self._json(self._post(self._api, data=d), 200) if rm_scopes: d = {'remove_scopes': rm_scopes} json = self._json(self._post(self._api, data=d), 200) if note or note_url: d = {'note': note, 'note_url': note_url} json = self._json(self._post(self._api, data=d), 200) if json: self._update_(json) success = True return success
[ "def", "update", "(", "self", ",", "scopes", "=", "[", "]", ",", "add_scopes", "=", "[", "]", ",", "rm_scopes", "=", "[", "]", ",", "note", "=", "''", ",", "note_url", "=", "''", ")", ":", "success", "=", "False", "json", "=", "None", "if", "scopes", ":", "d", "=", "{", "'scopes'", ":", "scopes", "}", "json", "=", "self", ".", "_json", "(", "self", ".", "_post", "(", "self", ".", "_api", ",", "data", "=", "d", ")", ",", "200", ")", "if", "add_scopes", ":", "d", "=", "{", "'add_scopes'", ":", "add_scopes", "}", "json", "=", "self", ".", "_json", "(", "self", ".", "_post", "(", "self", ".", "_api", ",", "data", "=", "d", ")", ",", "200", ")", "if", "rm_scopes", ":", "d", "=", "{", "'remove_scopes'", ":", "rm_scopes", "}", "json", "=", "self", ".", "_json", "(", "self", ".", "_post", "(", "self", ".", "_api", ",", "data", "=", "d", ")", ",", "200", ")", "if", "note", "or", "note_url", ":", "d", "=", "{", "'note'", ":", "note", ",", "'note_url'", ":", "note_url", "}", "json", "=", "self", ".", "_json", "(", "self", ".", "_post", "(", "self", ".", "_api", ",", "data", "=", "d", ")", ",", "200", ")", "if", "json", ":", "self", ".", "_update_", "(", "json", ")", "success", "=", "True", "return", "success" ]
Update this authorization. :param list scopes: (optional), replaces the authorization scopes with these :param list add_scopes: (optional), scopes to be added :param list rm_scopes: (optional), scopes to be removed :param str note: (optional), new note about authorization :param str note_url: (optional), new note URL about this authorization :returns: bool
[ "Update", "this", "authorization", "." ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/auths.py#L67-L99
kislyuk/aegea
aegea/packages/github3/issues/milestone.py
Milestone.iter_labels
def iter_labels(self, number=-1, etag=None): """Iterate over the labels for every issue associated with this milestone. .. versionchanged:: 0.9 Add etag parameter. :param int number: (optional), number of labels to return. Default: -1 returns all available labels. :param str etag: (optional), ETag header from a previous response :returns: generator of :class:`Label <github3.issues.label.Label>`\ s """ url = self._build_url('labels', base_url=self._api) return self._iter(int(number), url, Label, etag=etag)
python
def iter_labels(self, number=-1, etag=None): """Iterate over the labels for every issue associated with this milestone. .. versionchanged:: 0.9 Add etag parameter. :param int number: (optional), number of labels to return. Default: -1 returns all available labels. :param str etag: (optional), ETag header from a previous response :returns: generator of :class:`Label <github3.issues.label.Label>`\ s """ url = self._build_url('labels', base_url=self._api) return self._iter(int(number), url, Label, etag=etag)
[ "def", "iter_labels", "(", "self", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'labels'", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "Label", ",", "etag", "=", "etag", ")" ]
Iterate over the labels for every issue associated with this milestone. .. versionchanged:: 0.9 Add etag parameter. :param int number: (optional), number of labels to return. Default: -1 returns all available labels. :param str etag: (optional), ETag header from a previous response :returns: generator of :class:`Label <github3.issues.label.Label>`\ s
[ "Iterate", "over", "the", "labels", "for", "every", "issue", "associated", "with", "this", "milestone", "." ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/issues/milestone.py#L60-L74
bwohlberg/jonga
jonga.py
current_function
def current_function(frame): """ Get reference to currently running function from inspect/trace stack frame. Parameters ---------- frame : stack frame Stack frame obtained via trace or inspect Returns ------- fnc : function reference Currently running function """ if frame is None: return None code = frame.f_code # Attempting to extract the function reference for these calls appears # to be problematic if code.co_name == '__del__' or code.co_name == '_remove' or \ code.co_name == '_removeHandlerRef': return None try: # Solution follows suggestion at http://stackoverflow.com/a/37099372 lst = [referer for referer in gc.get_referrers(code) if getattr(referer, "__code__", None) is code and inspect.getclosurevars(referer).nonlocals.items() <= frame.f_locals.items()] if lst: return lst[0] else: return None except ValueError: # inspect.getclosurevars can fail with ValueError: Cell is empty return None
python
def current_function(frame): """ Get reference to currently running function from inspect/trace stack frame. Parameters ---------- frame : stack frame Stack frame obtained via trace or inspect Returns ------- fnc : function reference Currently running function """ if frame is None: return None code = frame.f_code # Attempting to extract the function reference for these calls appears # to be problematic if code.co_name == '__del__' or code.co_name == '_remove' or \ code.co_name == '_removeHandlerRef': return None try: # Solution follows suggestion at http://stackoverflow.com/a/37099372 lst = [referer for referer in gc.get_referrers(code) if getattr(referer, "__code__", None) is code and inspect.getclosurevars(referer).nonlocals.items() <= frame.f_locals.items()] if lst: return lst[0] else: return None except ValueError: # inspect.getclosurevars can fail with ValueError: Cell is empty return None
[ "def", "current_function", "(", "frame", ")", ":", "if", "frame", "is", "None", ":", "return", "None", "code", "=", "frame", ".", "f_code", "# Attempting to extract the function reference for these calls appears", "# to be problematic", "if", "code", ".", "co_name", "==", "'__del__'", "or", "code", ".", "co_name", "==", "'_remove'", "or", "code", ".", "co_name", "==", "'_removeHandlerRef'", ":", "return", "None", "try", ":", "# Solution follows suggestion at http://stackoverflow.com/a/37099372", "lst", "=", "[", "referer", "for", "referer", "in", "gc", ".", "get_referrers", "(", "code", ")", "if", "getattr", "(", "referer", ",", "\"__code__\"", ",", "None", ")", "is", "code", "and", "inspect", ".", "getclosurevars", "(", "referer", ")", ".", "nonlocals", ".", "items", "(", ")", "<=", "frame", ".", "f_locals", ".", "items", "(", ")", "]", "if", "lst", ":", "return", "lst", "[", "0", "]", "else", ":", "return", "None", "except", "ValueError", ":", "# inspect.getclosurevars can fail with ValueError: Cell is empty", "return", "None" ]
Get reference to currently running function from inspect/trace stack frame. Parameters ---------- frame : stack frame Stack frame obtained via trace or inspect Returns ------- fnc : function reference Currently running function
[ "Get", "reference", "to", "currently", "running", "function", "from", "inspect", "/", "trace", "stack", "frame", "." ]
train
https://github.com/bwohlberg/jonga/blob/1947d40d78d814fbdc3a973ceffec4b69b4623f2/jonga.py#L25-L62
bwohlberg/jonga
jonga.py
current_module_name
def current_module_name(frame): """ Get name of module of currently running function from inspect/trace stack frame. Parameters ---------- frame : stack frame Stack frame obtained via trace or inspect Returns ------- modname : string Currently running function module name """ if frame is None: return None if hasattr(frame.f_globals, '__name__'): return frame.f_globals['__name__'] else: mod = inspect.getmodule(frame) if mod is None: return '' else: return mod.__name__
python
def current_module_name(frame): """ Get name of module of currently running function from inspect/trace stack frame. Parameters ---------- frame : stack frame Stack frame obtained via trace or inspect Returns ------- modname : string Currently running function module name """ if frame is None: return None if hasattr(frame.f_globals, '__name__'): return frame.f_globals['__name__'] else: mod = inspect.getmodule(frame) if mod is None: return '' else: return mod.__name__
[ "def", "current_module_name", "(", "frame", ")", ":", "if", "frame", "is", "None", ":", "return", "None", "if", "hasattr", "(", "frame", ".", "f_globals", ",", "'__name__'", ")", ":", "return", "frame", ".", "f_globals", "[", "'__name__'", "]", "else", ":", "mod", "=", "inspect", ".", "getmodule", "(", "frame", ")", "if", "mod", "is", "None", ":", "return", "''", "else", ":", "return", "mod", ".", "__name__" ]
Get name of module of currently running function from inspect/trace stack frame. Parameters ---------- frame : stack frame Stack frame obtained via trace or inspect Returns ------- modname : string Currently running function module name
[ "Get", "name", "of", "module", "of", "currently", "running", "function", "from", "inspect", "/", "trace", "stack", "frame", "." ]
train
https://github.com/bwohlberg/jonga/blob/1947d40d78d814fbdc3a973ceffec4b69b4623f2/jonga.py#L111-L137
bwohlberg/jonga
jonga.py
CallTracer._trace
def _trace(self, frame, event, arg): """ Build a record of called functions using the trace mechanism """ # Return if this is not a function call if event != 'call': return # Filter calling and called functions by module names src_mod = current_module_name(frame.f_back) dst_mod = current_module_name(frame) # Avoid tracing the tracer (specifically, call from # ContextCallTracer.__exit__ to CallTracer.stop) if src_mod == __modulename__ or dst_mod == __modulename__: return # Apply source and destination module filters if not self.srcmodflt.match(src_mod): return if not self.dstmodflt.match(dst_mod): return # Get calling and called functions src_func = current_function(frame.f_back) dst_func = current_function(frame) # Filter calling and called functions by qnames if not self.srcqnmflt.match(function_qname(src_func)): return if not self.dstqnmflt.match(function_qname(dst_func)): return # Get calling and called function full names src_name = function_fqname(src_func) dst_name = function_fqname(dst_func) # Modify full function names if necessary if self.fnmsub is not None: src_name = re.sub(self.fnmsub[0], self.fnmsub[1], src_name) dst_name = re.sub(self.fnmsub[0], self.fnmsub[1], dst_name) # Update calling function count if src_func is not None: if src_name in self.fncts: self.fncts[src_name][0] += 1 else: self.fncts[src_name] = [1, 0] # Update called function count if dst_func is not None and src_func is not None: if dst_name in self.fncts: self.fncts[dst_name][1] += 1 else: self.fncts[dst_name] = [0, 1] # Update caller/calling pair count if dst_func is not None and src_func is not None: key = (src_name, dst_name) if key in self.calls: self.calls[key] += 1 else: self.calls[key] = 1
python
def _trace(self, frame, event, arg): """ Build a record of called functions using the trace mechanism """ # Return if this is not a function call if event != 'call': return # Filter calling and called functions by module names src_mod = current_module_name(frame.f_back) dst_mod = current_module_name(frame) # Avoid tracing the tracer (specifically, call from # ContextCallTracer.__exit__ to CallTracer.stop) if src_mod == __modulename__ or dst_mod == __modulename__: return # Apply source and destination module filters if not self.srcmodflt.match(src_mod): return if not self.dstmodflt.match(dst_mod): return # Get calling and called functions src_func = current_function(frame.f_back) dst_func = current_function(frame) # Filter calling and called functions by qnames if not self.srcqnmflt.match(function_qname(src_func)): return if not self.dstqnmflt.match(function_qname(dst_func)): return # Get calling and called function full names src_name = function_fqname(src_func) dst_name = function_fqname(dst_func) # Modify full function names if necessary if self.fnmsub is not None: src_name = re.sub(self.fnmsub[0], self.fnmsub[1], src_name) dst_name = re.sub(self.fnmsub[0], self.fnmsub[1], dst_name) # Update calling function count if src_func is not None: if src_name in self.fncts: self.fncts[src_name][0] += 1 else: self.fncts[src_name] = [1, 0] # Update called function count if dst_func is not None and src_func is not None: if dst_name in self.fncts: self.fncts[dst_name][1] += 1 else: self.fncts[dst_name] = [0, 1] # Update caller/calling pair count if dst_func is not None and src_func is not None: key = (src_name, dst_name) if key in self.calls: self.calls[key] += 1 else: self.calls[key] = 1
[ "def", "_trace", "(", "self", ",", "frame", ",", "event", ",", "arg", ")", ":", "# Return if this is not a function call", "if", "event", "!=", "'call'", ":", "return", "# Filter calling and called functions by module names", "src_mod", "=", "current_module_name", "(", "frame", ".", "f_back", ")", "dst_mod", "=", "current_module_name", "(", "frame", ")", "# Avoid tracing the tracer (specifically, call from", "# ContextCallTracer.__exit__ to CallTracer.stop)", "if", "src_mod", "==", "__modulename__", "or", "dst_mod", "==", "__modulename__", ":", "return", "# Apply source and destination module filters", "if", "not", "self", ".", "srcmodflt", ".", "match", "(", "src_mod", ")", ":", "return", "if", "not", "self", ".", "dstmodflt", ".", "match", "(", "dst_mod", ")", ":", "return", "# Get calling and called functions", "src_func", "=", "current_function", "(", "frame", ".", "f_back", ")", "dst_func", "=", "current_function", "(", "frame", ")", "# Filter calling and called functions by qnames", "if", "not", "self", ".", "srcqnmflt", ".", "match", "(", "function_qname", "(", "src_func", ")", ")", ":", "return", "if", "not", "self", ".", "dstqnmflt", ".", "match", "(", "function_qname", "(", "dst_func", ")", ")", ":", "return", "# Get calling and called function full names", "src_name", "=", "function_fqname", "(", "src_func", ")", "dst_name", "=", "function_fqname", "(", "dst_func", ")", "# Modify full function names if necessary", "if", "self", ".", "fnmsub", "is", "not", "None", ":", "src_name", "=", "re", ".", "sub", "(", "self", ".", "fnmsub", "[", "0", "]", ",", "self", ".", "fnmsub", "[", "1", "]", ",", "src_name", ")", "dst_name", "=", "re", ".", "sub", "(", "self", ".", "fnmsub", "[", "0", "]", ",", "self", ".", "fnmsub", "[", "1", "]", ",", "dst_name", ")", "# Update calling function count", "if", "src_func", "is", "not", "None", ":", "if", "src_name", "in", "self", ".", "fncts", ":", "self", ".", "fncts", "[", "src_name", "]", "[", "0", "]", "+=", "1", "else", ":", "self", ".", "fncts", "[", "src_name", "]", "=", "[", "1", ",", "0", "]", "# Update called function count", "if", "dst_func", "is", "not", "None", "and", "src_func", "is", "not", "None", ":", "if", "dst_name", "in", "self", ".", "fncts", ":", "self", ".", "fncts", "[", "dst_name", "]", "[", "1", "]", "+=", "1", "else", ":", "self", ".", "fncts", "[", "dst_name", "]", "=", "[", "0", ",", "1", "]", "# Update caller/calling pair count", "if", "dst_func", "is", "not", "None", "and", "src_func", "is", "not", "None", ":", "key", "=", "(", "src_name", ",", "dst_name", ")", "if", "key", "in", "self", ".", "calls", ":", "self", ".", "calls", "[", "key", "]", "+=", "1", "else", ":", "self", ".", "calls", "[", "key", "]", "=", "1" ]
Build a record of called functions using the trace mechanism
[ "Build", "a", "record", "of", "called", "functions", "using", "the", "trace", "mechanism" ]
train
https://github.com/bwohlberg/jonga/blob/1947d40d78d814fbdc3a973ceffec4b69b4623f2/jonga.py#L232-L295
bwohlberg/jonga
jonga.py
CallTracer.stop
def stop(self): """Stop tracing""" # Stop tracing sys.settrace(None) # Build group structure if group filter is defined if self.grpflt is not None: # Iterate over graph nodes (functions) for k in self.fncts: # Construct group identity string m = self.grpflt.search(k) # If group identity string found, append current node # to that group if m is not None: ms = m.group(0) if ms in self.group: self.group[ms].append(k) else: self.group[ms] = [k, ]
python
def stop(self): """Stop tracing""" # Stop tracing sys.settrace(None) # Build group structure if group filter is defined if self.grpflt is not None: # Iterate over graph nodes (functions) for k in self.fncts: # Construct group identity string m = self.grpflt.search(k) # If group identity string found, append current node # to that group if m is not None: ms = m.group(0) if ms in self.group: self.group[ms].append(k) else: self.group[ms] = [k, ]
[ "def", "stop", "(", "self", ")", ":", "# Stop tracing", "sys", ".", "settrace", "(", "None", ")", "# Build group structure if group filter is defined", "if", "self", ".", "grpflt", "is", "not", "None", ":", "# Iterate over graph nodes (functions)", "for", "k", "in", "self", ".", "fncts", ":", "# Construct group identity string", "m", "=", "self", ".", "grpflt", ".", "search", "(", "k", ")", "# If group identity string found, append current node", "# to that group", "if", "m", "is", "not", "None", ":", "ms", "=", "m", ".", "group", "(", "0", ")", "if", "ms", "in", "self", ".", "group", ":", "self", ".", "group", "[", "ms", "]", ".", "append", "(", "k", ")", "else", ":", "self", ".", "group", "[", "ms", "]", "=", "[", "k", ",", "]" ]
Stop tracing
[ "Stop", "tracing" ]
train
https://github.com/bwohlberg/jonga/blob/1947d40d78d814fbdc3a973ceffec4b69b4623f2/jonga.py#L306-L325
bwohlberg/jonga
jonga.py
CallTracer._clrgen
def _clrgen(n, h0, hr): """Default colour generating function Parameters ---------- n : int Number of colours to generate h0 : float Initial H value in HSV colour specification hr : float Size of H value range to use for colour generation (final H value is h0 + hr) Returns ------- clst : list of strings List of HSV format colour specification strings """ n0 = n if n == 1 else n-1 clst = ['%f,%f,%f' % (h0 + hr*hi/n0, 0.35, 0.85) for hi in range(n)] return clst
python
def _clrgen(n, h0, hr): """Default colour generating function Parameters ---------- n : int Number of colours to generate h0 : float Initial H value in HSV colour specification hr : float Size of H value range to use for colour generation (final H value is h0 + hr) Returns ------- clst : list of strings List of HSV format colour specification strings """ n0 = n if n == 1 else n-1 clst = ['%f,%f,%f' % (h0 + hr*hi/n0, 0.35, 0.85) for hi in range(n)] return clst
[ "def", "_clrgen", "(", "n", ",", "h0", ",", "hr", ")", ":", "n0", "=", "n", "if", "n", "==", "1", "else", "n", "-", "1", "clst", "=", "[", "'%f,%f,%f'", "%", "(", "h0", "+", "hr", "*", "hi", "/", "n0", ",", "0.35", ",", "0.85", ")", "for", "hi", "in", "range", "(", "n", ")", "]", "return", "clst" ]
Default colour generating function Parameters ---------- n : int Number of colours to generate h0 : float Initial H value in HSV colour specification hr : float Size of H value range to use for colour generation (final H value is h0 + hr) Returns ------- clst : list of strings List of HSV format colour specification strings
[ "Default", "colour", "generating", "function" ]
train
https://github.com/bwohlberg/jonga/blob/1947d40d78d814fbdc3a973ceffec4b69b4623f2/jonga.py#L331-L354
bwohlberg/jonga
jonga.py
CallTracer.graph
def graph(self, fnm=None, size=None, fntsz=None, fntfm=None, clrgen=None, rmsz=False, prog='dot'): """ Construct call graph Parameters ---------- fnm : None or string, optional (default None) Filename of graph file to be written. File type is determined by the file extentions (e.g. dot for 'graph.dot' and SVG for 'graph.svg'). If None, a file is not written. size : string or None, optional (default None) Graph image size specification string. fntsz : int or None, optional (default None) Font size for text. fntnm : string or None, optional (default None) Font family specification string. clrgen : function or None, optional (default None) Function to call to generate the group colours. This function should take an integer specifying the number of groups as an argument and return a list of graphviz-compatible colour specification strings. rmsz : bool, optional (default False) If True, remove the width and height specifications from an SVG format output file so that the size scales properly when viewed in a web browser prog : string, optional (default 'dot') Name of graphviz layout program to use. Returns ------- pgr : pygraphviz.AGraph Call graph of traced function calls """ # Default colour generation function if clrgen is None: clrgen = lambda n: self._clrgen(n, 0.330, 0.825) # Generate color list clrlst = clrgen(len(self.group)) # Initialise a pygraphviz graph g = pgv.AGraph(strict=False, directed=True, landscape=False, rankdir='LR', newrank=True, fontsize=fntsz, fontname=fntfm, size=size, ratio='compress', color='black', bgcolor='#ffffff00') # Set graph attributes g.node_attr.update(penwidth=0.25, shape='box', style='rounded,filled') # Iterate over functions adding them as graph nodes for k in self.fncts: g.add_node(k, fontsize=fntsz, fontname=fntfm) # If lnksub regex pair is provided, compute an href link # target from the node name and add it as an attribute to # the node if self.lnksub is not None: lnktgt = re.sub(self.lnksub[0], self.lnksub[1], k) g.get_node(k).attr.update(href=lnktgt, target="_top") # If function has no calls to it, set its rank to "source" if self.fncts[k][1] == 0: g.get_node(k).attr.update(rank='source') # If groups defined, construct a subgraph for each and add the # nodes in each group to the corresponding subgraph if self.group: fngrpnm = {} # Iterate over group number/group name pairs for k in zip(range(len(self.group)), sorted(self.group)): g.add_subgraph(self.group[k[1]], name='cluster_' + k[1], label=k[1], penwidth=2, style='dotted', pencolor=clrlst[k[0]]) # Iterate over nodes in current group for l in self.group[k[1]]: # Create record of function group number fngrpnm[l] = k[0] # Set common group colour for current node g.get_node(l).attr.update(fillcolor=clrlst[k[0]]) # Iterate over function calls, adding each as an edge for k in self.calls: # If groups defined, set edge colour according to group of # calling function, otherwise set a standard colour if self.group: g.add_edge(k[0], k[1], penwidth=2, color=clrlst[fngrpnm[k[0]]]) else: g.add_edge(k[0], k[1], color='grey') # Call layout program g.layout(prog=prog) # Write graph file if filename provided if fnm is not None: ext = os.path.splitext(fnm)[1] if ext == '.dot': g.write(fnm) else: if ext == '.svg' and rmsz: img = g.draw(format='svg').decode('utf-8') cp = re.compile(r'\n<svg width=\"[^\"]*\" ' 'height=\"[^\"]*\"') img = cp.sub(r'\n<svg', img, count=1) with open(fnm, 'w') as fd: fd.write(img) else: g.draw(fnm) # Return graph object return g
python
def graph(self, fnm=None, size=None, fntsz=None, fntfm=None, clrgen=None, rmsz=False, prog='dot'): """ Construct call graph Parameters ---------- fnm : None or string, optional (default None) Filename of graph file to be written. File type is determined by the file extentions (e.g. dot for 'graph.dot' and SVG for 'graph.svg'). If None, a file is not written. size : string or None, optional (default None) Graph image size specification string. fntsz : int or None, optional (default None) Font size for text. fntnm : string or None, optional (default None) Font family specification string. clrgen : function or None, optional (default None) Function to call to generate the group colours. This function should take an integer specifying the number of groups as an argument and return a list of graphviz-compatible colour specification strings. rmsz : bool, optional (default False) If True, remove the width and height specifications from an SVG format output file so that the size scales properly when viewed in a web browser prog : string, optional (default 'dot') Name of graphviz layout program to use. Returns ------- pgr : pygraphviz.AGraph Call graph of traced function calls """ # Default colour generation function if clrgen is None: clrgen = lambda n: self._clrgen(n, 0.330, 0.825) # Generate color list clrlst = clrgen(len(self.group)) # Initialise a pygraphviz graph g = pgv.AGraph(strict=False, directed=True, landscape=False, rankdir='LR', newrank=True, fontsize=fntsz, fontname=fntfm, size=size, ratio='compress', color='black', bgcolor='#ffffff00') # Set graph attributes g.node_attr.update(penwidth=0.25, shape='box', style='rounded,filled') # Iterate over functions adding them as graph nodes for k in self.fncts: g.add_node(k, fontsize=fntsz, fontname=fntfm) # If lnksub regex pair is provided, compute an href link # target from the node name and add it as an attribute to # the node if self.lnksub is not None: lnktgt = re.sub(self.lnksub[0], self.lnksub[1], k) g.get_node(k).attr.update(href=lnktgt, target="_top") # If function has no calls to it, set its rank to "source" if self.fncts[k][1] == 0: g.get_node(k).attr.update(rank='source') # If groups defined, construct a subgraph for each and add the # nodes in each group to the corresponding subgraph if self.group: fngrpnm = {} # Iterate over group number/group name pairs for k in zip(range(len(self.group)), sorted(self.group)): g.add_subgraph(self.group[k[1]], name='cluster_' + k[1], label=k[1], penwidth=2, style='dotted', pencolor=clrlst[k[0]]) # Iterate over nodes in current group for l in self.group[k[1]]: # Create record of function group number fngrpnm[l] = k[0] # Set common group colour for current node g.get_node(l).attr.update(fillcolor=clrlst[k[0]]) # Iterate over function calls, adding each as an edge for k in self.calls: # If groups defined, set edge colour according to group of # calling function, otherwise set a standard colour if self.group: g.add_edge(k[0], k[1], penwidth=2, color=clrlst[fngrpnm[k[0]]]) else: g.add_edge(k[0], k[1], color='grey') # Call layout program g.layout(prog=prog) # Write graph file if filename provided if fnm is not None: ext = os.path.splitext(fnm)[1] if ext == '.dot': g.write(fnm) else: if ext == '.svg' and rmsz: img = g.draw(format='svg').decode('utf-8') cp = re.compile(r'\n<svg width=\"[^\"]*\" ' 'height=\"[^\"]*\"') img = cp.sub(r'\n<svg', img, count=1) with open(fnm, 'w') as fd: fd.write(img) else: g.draw(fnm) # Return graph object return g
[ "def", "graph", "(", "self", ",", "fnm", "=", "None", ",", "size", "=", "None", ",", "fntsz", "=", "None", ",", "fntfm", "=", "None", ",", "clrgen", "=", "None", ",", "rmsz", "=", "False", ",", "prog", "=", "'dot'", ")", ":", "# Default colour generation function", "if", "clrgen", "is", "None", ":", "clrgen", "=", "lambda", "n", ":", "self", ".", "_clrgen", "(", "n", ",", "0.330", ",", "0.825", ")", "# Generate color list", "clrlst", "=", "clrgen", "(", "len", "(", "self", ".", "group", ")", ")", "# Initialise a pygraphviz graph", "g", "=", "pgv", ".", "AGraph", "(", "strict", "=", "False", ",", "directed", "=", "True", ",", "landscape", "=", "False", ",", "rankdir", "=", "'LR'", ",", "newrank", "=", "True", ",", "fontsize", "=", "fntsz", ",", "fontname", "=", "fntfm", ",", "size", "=", "size", ",", "ratio", "=", "'compress'", ",", "color", "=", "'black'", ",", "bgcolor", "=", "'#ffffff00'", ")", "# Set graph attributes", "g", ".", "node_attr", ".", "update", "(", "penwidth", "=", "0.25", ",", "shape", "=", "'box'", ",", "style", "=", "'rounded,filled'", ")", "# Iterate over functions adding them as graph nodes", "for", "k", "in", "self", ".", "fncts", ":", "g", ".", "add_node", "(", "k", ",", "fontsize", "=", "fntsz", ",", "fontname", "=", "fntfm", ")", "# If lnksub regex pair is provided, compute an href link", "# target from the node name and add it as an attribute to", "# the node", "if", "self", ".", "lnksub", "is", "not", "None", ":", "lnktgt", "=", "re", ".", "sub", "(", "self", ".", "lnksub", "[", "0", "]", ",", "self", ".", "lnksub", "[", "1", "]", ",", "k", ")", "g", ".", "get_node", "(", "k", ")", ".", "attr", ".", "update", "(", "href", "=", "lnktgt", ",", "target", "=", "\"_top\"", ")", "# If function has no calls to it, set its rank to \"source\"", "if", "self", ".", "fncts", "[", "k", "]", "[", "1", "]", "==", "0", ":", "g", ".", "get_node", "(", "k", ")", ".", "attr", ".", "update", "(", "rank", "=", "'source'", ")", "# If groups defined, construct a subgraph for each and add the", "# nodes in each group to the corresponding subgraph", "if", "self", ".", "group", ":", "fngrpnm", "=", "{", "}", "# Iterate over group number/group name pairs", "for", "k", "in", "zip", "(", "range", "(", "len", "(", "self", ".", "group", ")", ")", ",", "sorted", "(", "self", ".", "group", ")", ")", ":", "g", ".", "add_subgraph", "(", "self", ".", "group", "[", "k", "[", "1", "]", "]", ",", "name", "=", "'cluster_'", "+", "k", "[", "1", "]", ",", "label", "=", "k", "[", "1", "]", ",", "penwidth", "=", "2", ",", "style", "=", "'dotted'", ",", "pencolor", "=", "clrlst", "[", "k", "[", "0", "]", "]", ")", "# Iterate over nodes in current group", "for", "l", "in", "self", ".", "group", "[", "k", "[", "1", "]", "]", ":", "# Create record of function group number", "fngrpnm", "[", "l", "]", "=", "k", "[", "0", "]", "# Set common group colour for current node", "g", ".", "get_node", "(", "l", ")", ".", "attr", ".", "update", "(", "fillcolor", "=", "clrlst", "[", "k", "[", "0", "]", "]", ")", "# Iterate over function calls, adding each as an edge", "for", "k", "in", "self", ".", "calls", ":", "# If groups defined, set edge colour according to group of", "# calling function, otherwise set a standard colour", "if", "self", ".", "group", ":", "g", ".", "add_edge", "(", "k", "[", "0", "]", ",", "k", "[", "1", "]", ",", "penwidth", "=", "2", ",", "color", "=", "clrlst", "[", "fngrpnm", "[", "k", "[", "0", "]", "]", "]", ")", "else", ":", "g", ".", "add_edge", "(", "k", "[", "0", "]", ",", "k", "[", "1", "]", ",", "color", "=", "'grey'", ")", "# Call layout program", "g", ".", "layout", "(", "prog", "=", "prog", ")", "# Write graph file if filename provided", "if", "fnm", "is", "not", "None", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fnm", ")", "[", "1", "]", "if", "ext", "==", "'.dot'", ":", "g", ".", "write", "(", "fnm", ")", "else", ":", "if", "ext", "==", "'.svg'", "and", "rmsz", ":", "img", "=", "g", ".", "draw", "(", "format", "=", "'svg'", ")", ".", "decode", "(", "'utf-8'", ")", "cp", "=", "re", ".", "compile", "(", "r'\\n<svg width=\\\"[^\\\"]*\\\" '", "'height=\\\"[^\\\"]*\\\"'", ")", "img", "=", "cp", ".", "sub", "(", "r'\\n<svg'", ",", "img", ",", "count", "=", "1", ")", "with", "open", "(", "fnm", ",", "'w'", ")", "as", "fd", ":", "fd", ".", "write", "(", "img", ")", "else", ":", "g", ".", "draw", "(", "fnm", ")", "# Return graph object", "return", "g" ]
Construct call graph Parameters ---------- fnm : None or string, optional (default None) Filename of graph file to be written. File type is determined by the file extentions (e.g. dot for 'graph.dot' and SVG for 'graph.svg'). If None, a file is not written. size : string or None, optional (default None) Graph image size specification string. fntsz : int or None, optional (default None) Font size for text. fntnm : string or None, optional (default None) Font family specification string. clrgen : function or None, optional (default None) Function to call to generate the group colours. This function should take an integer specifying the number of groups as an argument and return a list of graphviz-compatible colour specification strings. rmsz : bool, optional (default False) If True, remove the width and height specifications from an SVG format output file so that the size scales properly when viewed in a web browser prog : string, optional (default 'dot') Name of graphviz layout program to use. Returns ------- pgr : pygraphviz.AGraph Call graph of traced function calls
[ "Construct", "call", "graph" ]
train
https://github.com/bwohlberg/jonga/blob/1947d40d78d814fbdc3a973ceffec4b69b4623f2/jonga.py#L358-L466
kislyuk/aegea
aegea/packages/github3/pulls.py
PullRequest.create_review_comment
def create_review_comment(self, body, commit_id, path, position): """Create a review comment on this pull request. All parameters are required by the GitHub API. :param str body: The comment text itself :param str commit_id: The SHA of the commit to comment on :param str path: The relative path of the file to comment on :param int position: The line index in the diff to comment on. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment` """ url = self._build_url('comments', base_url=self._api) data = {'body': body, 'commit_id': commit_id, 'path': path, 'position': int(position)} json = self._json(self._post(url, data=data), 201) return ReviewComment(json, self) if json else None
python
def create_review_comment(self, body, commit_id, path, position): """Create a review comment on this pull request. All parameters are required by the GitHub API. :param str body: The comment text itself :param str commit_id: The SHA of the commit to comment on :param str path: The relative path of the file to comment on :param int position: The line index in the diff to comment on. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment` """ url = self._build_url('comments', base_url=self._api) data = {'body': body, 'commit_id': commit_id, 'path': path, 'position': int(position)} json = self._json(self._post(url, data=data), 201) return ReviewComment(json, self) if json else None
[ "def", "create_review_comment", "(", "self", ",", "body", ",", "commit_id", ",", "path", ",", "position", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'comments'", ",", "base_url", "=", "self", ".", "_api", ")", "data", "=", "{", "'body'", ":", "body", ",", "'commit_id'", ":", "commit_id", ",", "'path'", ":", "path", ",", "'position'", ":", "int", "(", "position", ")", "}", "json", "=", "self", ".", "_json", "(", "self", ".", "_post", "(", "url", ",", "data", "=", "data", ")", ",", "201", ")", "return", "ReviewComment", "(", "json", ",", "self", ")", "if", "json", "else", "None" ]
Create a review comment on this pull request. All parameters are required by the GitHub API. :param str body: The comment text itself :param str commit_id: The SHA of the commit to comment on :param str path: The relative path of the file to comment on :param int position: The line index in the diff to comment on. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment`
[ "Create", "a", "review", "comment", "on", "this", "pull", "request", "." ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L222-L238
kislyuk/aegea
aegea/packages/github3/pulls.py
PullRequest.diff
def diff(self): """Return the diff""" resp = self._get(self._api, headers={'Accept': 'application/vnd.github.diff'}) return resp.content if self._boolean(resp, 200, 404) else None
python
def diff(self): """Return the diff""" resp = self._get(self._api, headers={'Accept': 'application/vnd.github.diff'}) return resp.content if self._boolean(resp, 200, 404) else None
[ "def", "diff", "(", "self", ")", ":", "resp", "=", "self", ".", "_get", "(", "self", ".", "_api", ",", "headers", "=", "{", "'Accept'", ":", "'application/vnd.github.diff'", "}", ")", "return", "resp", ".", "content", "if", "self", ".", "_boolean", "(", "resp", ",", "200", ",", "404", ")", "else", "None" ]
Return the diff
[ "Return", "the", "diff" ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L240-L244
kislyuk/aegea
aegea/packages/github3/pulls.py
PullRequest.is_merged
def is_merged(self): """Checks to see if the pull request was merged. :returns: bool """ url = self._build_url('merge', base_url=self._api) return self._boolean(self._get(url), 204, 404)
python
def is_merged(self): """Checks to see if the pull request was merged. :returns: bool """ url = self._build_url('merge', base_url=self._api) return self._boolean(self._get(url), 204, 404)
[ "def", "is_merged", "(", "self", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'merge'", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_boolean", "(", "self", ".", "_get", "(", "url", ")", ",", "204", ",", "404", ")" ]
Checks to see if the pull request was merged. :returns: bool
[ "Checks", "to", "see", "if", "the", "pull", "request", "was", "merged", "." ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L246-L252
kislyuk/aegea
aegea/packages/github3/pulls.py
PullRequest.iter_comments
def iter_comments(self, number=-1, etag=None): """Iterate over the comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ReviewComment <ReviewComment>`\ s """ url = self._build_url('comments', base_url=self._api) return self._iter(int(number), url, ReviewComment, etag=etag)
python
def iter_comments(self, number=-1, etag=None): """Iterate over the comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ReviewComment <ReviewComment>`\ s """ url = self._build_url('comments', base_url=self._api) return self._iter(int(number), url, ReviewComment, etag=etag)
[ "def", "iter_comments", "(", "self", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'comments'", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "ReviewComment", ",", "etag", "=", "etag", ")" ]
Iterate over the comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ReviewComment <ReviewComment>`\ s
[ "Iterate", "over", "the", "comments", "on", "this", "pull", "request", "." ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L254-L264
kislyuk/aegea
aegea/packages/github3/pulls.py
PullRequest.iter_files
def iter_files(self, number=-1, etag=None): """Iterate over the files associated with this pull request. :param int number: (optional), number of files to return. Default: -1 returns all available files. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`PullFile <PullFile>`\ s """ url = self._build_url('files', base_url=self._api) return self._iter(int(number), url, PullFile, etag=etag)
python
def iter_files(self, number=-1, etag=None): """Iterate over the files associated with this pull request. :param int number: (optional), number of files to return. Default: -1 returns all available files. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`PullFile <PullFile>`\ s """ url = self._build_url('files', base_url=self._api) return self._iter(int(number), url, PullFile, etag=etag)
[ "def", "iter_files", "(", "self", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'files'", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "PullFile", ",", "etag", "=", "etag", ")" ]
Iterate over the files associated with this pull request. :param int number: (optional), number of files to return. Default: -1 returns all available files. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`PullFile <PullFile>`\ s
[ "Iterate", "over", "the", "files", "associated", "with", "this", "pull", "request", "." ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L278-L288
kislyuk/aegea
aegea/packages/github3/pulls.py
PullRequest.iter_issue_comments
def iter_issue_comments(self, number=-1, etag=None): """Iterate over the issue comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`IssueComment <IssueComment>`\ s """ url = self._build_url(base_url=self.links['comments']) return self._iter(int(number), url, IssueComment, etag=etag)
python
def iter_issue_comments(self, number=-1, etag=None): """Iterate over the issue comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`IssueComment <IssueComment>`\ s """ url = self._build_url(base_url=self.links['comments']) return self._iter(int(number), url, IssueComment, etag=etag)
[ "def", "iter_issue_comments", "(", "self", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "url", "=", "self", ".", "_build_url", "(", "base_url", "=", "self", ".", "links", "[", "'comments'", "]", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "IssueComment", ",", "etag", "=", "etag", ")" ]
Iterate over the issue comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`IssueComment <IssueComment>`\ s
[ "Iterate", "over", "the", "issue", "comments", "on", "this", "pull", "request", "." ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L290-L300
kislyuk/aegea
aegea/packages/github3/pulls.py
PullRequest.merge
def merge(self, commit_message='', sha=None): """Merge this pull request. :param str commit_message: (optional), message to be used for the merge commit :returns: bool """ parameters = {'commit_message': commit_message} if sha: parameters['sha'] = sha url = self._build_url('merge', base_url=self._api) json = self._json(self._put(url, data=dumps(parameters)), 200) self.merge_commit_sha = json['sha'] return json['merged']
python
def merge(self, commit_message='', sha=None): """Merge this pull request. :param str commit_message: (optional), message to be used for the merge commit :returns: bool """ parameters = {'commit_message': commit_message} if sha: parameters['sha'] = sha url = self._build_url('merge', base_url=self._api) json = self._json(self._put(url, data=dumps(parameters)), 200) self.merge_commit_sha = json['sha'] return json['merged']
[ "def", "merge", "(", "self", ",", "commit_message", "=", "''", ",", "sha", "=", "None", ")", ":", "parameters", "=", "{", "'commit_message'", ":", "commit_message", "}", "if", "sha", ":", "parameters", "[", "'sha'", "]", "=", "sha", "url", "=", "self", ".", "_build_url", "(", "'merge'", ",", "base_url", "=", "self", ".", "_api", ")", "json", "=", "self", ".", "_json", "(", "self", ".", "_put", "(", "url", ",", "data", "=", "dumps", "(", "parameters", ")", ")", ",", "200", ")", "self", ".", "merge_commit_sha", "=", "json", "[", "'sha'", "]", "return", "json", "[", "'merged'", "]" ]
Merge this pull request. :param str commit_message: (optional), message to be used for the merge commit :returns: bool
[ "Merge", "this", "pull", "request", "." ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L303-L316
kislyuk/aegea
aegea/packages/github3/pulls.py
PullRequest.patch
def patch(self): """Return the patch""" resp = self._get(self._api, headers={'Accept': 'application/vnd.github.patch'}) return resp.content if self._boolean(resp, 200, 404) else None
python
def patch(self): """Return the patch""" resp = self._get(self._api, headers={'Accept': 'application/vnd.github.patch'}) return resp.content if self._boolean(resp, 200, 404) else None
[ "def", "patch", "(", "self", ")", ":", "resp", "=", "self", ".", "_get", "(", "self", ".", "_api", ",", "headers", "=", "{", "'Accept'", ":", "'application/vnd.github.patch'", "}", ")", "return", "resp", ".", "content", "if", "self", ".", "_boolean", "(", "resp", ",", "200", ",", "404", ")", "else", "None" ]
Return the patch
[ "Return", "the", "patch" ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L318-L322
kislyuk/aegea
aegea/packages/github3/pulls.py
PullRequest.update
def update(self, title=None, body=None, state=None): """Update this pull request. :param str title: (optional), title of the pull :param str body: (optional), body of the pull request :param str state: (optional), ('open', 'closed') :returns: bool """ data = {'title': title, 'body': body, 'state': state} json = None self._remove_none(data) if data: json = self._json(self._patch(self._api, data=dumps(data)), 200) if json: self._update_(json) return True return False
python
def update(self, title=None, body=None, state=None): """Update this pull request. :param str title: (optional), title of the pull :param str body: (optional), body of the pull request :param str state: (optional), ('open', 'closed') :returns: bool """ data = {'title': title, 'body': body, 'state': state} json = None self._remove_none(data) if data: json = self._json(self._patch(self._api, data=dumps(data)), 200) if json: self._update_(json) return True return False
[ "def", "update", "(", "self", ",", "title", "=", "None", ",", "body", "=", "None", ",", "state", "=", "None", ")", ":", "data", "=", "{", "'title'", ":", "title", ",", "'body'", ":", "body", ",", "'state'", ":", "state", "}", "json", "=", "None", "self", ".", "_remove_none", "(", "data", ")", "if", "data", ":", "json", "=", "self", ".", "_json", "(", "self", ".", "_patch", "(", "self", ".", "_api", ",", "data", "=", "dumps", "(", "data", ")", ")", ",", "200", ")", "if", "json", ":", "self", ".", "_update_", "(", "json", ")", "return", "True", "return", "False" ]
Update this pull request. :param str title: (optional), title of the pull :param str body: (optional), body of the pull request :param str state: (optional), ('open', 'closed') :returns: bool
[ "Update", "this", "pull", "request", "." ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L344-L362
kislyuk/aegea
aegea/packages/github3/pulls.py
ReviewComment.reply
def reply(self, body): """Reply to this review comment with a new review comment. :param str body: The text of the comment. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment` """ url = self._build_url('comments', base_url=self.pull_request_url) index = self._api.rfind('/') + 1 in_reply_to = self._api[index:] json = self._json(self._post(url, data={ 'body': body, 'in_reply_to': in_reply_to }), 201) return ReviewComment(json, self) if json else None
python
def reply(self, body): """Reply to this review comment with a new review comment. :param str body: The text of the comment. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment` """ url = self._build_url('comments', base_url=self.pull_request_url) index = self._api.rfind('/') + 1 in_reply_to = self._api[index:] json = self._json(self._post(url, data={ 'body': body, 'in_reply_to': in_reply_to }), 201) return ReviewComment(json, self) if json else None
[ "def", "reply", "(", "self", ",", "body", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'comments'", ",", "base_url", "=", "self", ".", "pull_request_url", ")", "index", "=", "self", ".", "_api", ".", "rfind", "(", "'/'", ")", "+", "1", "in_reply_to", "=", "self", ".", "_api", "[", "index", ":", "]", "json", "=", "self", ".", "_json", "(", "self", ".", "_post", "(", "url", ",", "data", "=", "{", "'body'", ":", "body", ",", "'in_reply_to'", ":", "in_reply_to", "}", ")", ",", "201", ")", "return", "ReviewComment", "(", "json", ",", "self", ")", "if", "json", "else", "None" ]
Reply to this review comment with a new review comment. :param str body: The text of the comment. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment`
[ "Reply", "to", "this", "review", "comment", "with", "a", "new", "review", "comment", "." ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L414-L427
kislyuk/aegea
aegea/packages/github3/orgs.py
Team.add_member
def add_member(self, login): """Add ``login`` to this team. :returns: bool """ warnings.warn( 'This is no longer supported by the GitHub API, see ' 'https://developer.github.com/changes/2014-09-23-one-more-week' '-before-the-add-team-member-api-breaking-change/', DeprecationWarning) url = self._build_url('members', login, base_url=self._api) return self._boolean(self._put(url), 204, 404)
python
def add_member(self, login): """Add ``login`` to this team. :returns: bool """ warnings.warn( 'This is no longer supported by the GitHub API, see ' 'https://developer.github.com/changes/2014-09-23-one-more-week' '-before-the-add-team-member-api-breaking-change/', DeprecationWarning) url = self._build_url('members', login, base_url=self._api) return self._boolean(self._put(url), 204, 404)
[ "def", "add_member", "(", "self", ",", "login", ")", ":", "warnings", ".", "warn", "(", "'This is no longer supported by the GitHub API, see '", "'https://developer.github.com/changes/2014-09-23-one-more-week'", "'-before-the-add-team-member-api-breaking-change/'", ",", "DeprecationWarning", ")", "url", "=", "self", ".", "_build_url", "(", "'members'", ",", "login", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_boolean", "(", "self", ".", "_put", "(", "url", ")", ",", "204", ",", "404", ")" ]
Add ``login`` to this team. :returns: bool
[ "Add", "login", "to", "this", "team", "." ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/orgs.py#L66-L77
kislyuk/aegea
aegea/packages/github3/orgs.py
Team.add_repo
def add_repo(self, repo): """Add ``repo`` to this team. :param str repo: (required), form: 'user/repo' :returns: bool """ url = self._build_url('repos', repo, base_url=self._api) return self._boolean(self._put(url), 204, 404)
python
def add_repo(self, repo): """Add ``repo`` to this team. :param str repo: (required), form: 'user/repo' :returns: bool """ url = self._build_url('repos', repo, base_url=self._api) return self._boolean(self._put(url), 204, 404)
[ "def", "add_repo", "(", "self", ",", "repo", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'repos'", ",", "repo", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_boolean", "(", "self", ".", "_put", "(", "url", ")", ",", "204", ",", "404", ")" ]
Add ``repo`` to this team. :param str repo: (required), form: 'user/repo' :returns: bool
[ "Add", "repo", "to", "this", "team", "." ]
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/orgs.py#L80-L87