body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def load_gt_obj(self):
' load bbox ground truth from files either via the provided label directory or list of label files'
files = os.listdir(self.label_dir)
files = list(filter((lambda x: x.endswith(self.label_ext)), files))
if (len(files) == 0):
raise RuntimeError(('error: no label files found... | -6,172,390,386,588,916,000 | load bbox ground truth from files either via the provided label directory or list of label files | digits/extensions/data/objectDetection/utils.py | load_gt_obj | dcmartin/digits | python | def load_gt_obj(self):
' '
files = os.listdir(self.label_dir)
files = list(filter((lambda x: x.endswith(self.label_ext)), files))
if (len(files) == 0):
raise RuntimeError(('error: no label files found in %s' % self.label_dir))
for label_file in files:
objects_per_image = list()
... |
def generate_mesh(meshing_dir, params):
'\n Launch Mesh Generator to generate mesh.\n\n @param meshing_dir: the meshing directory\n @param params: the meshing parameters\n @return: the mesh generation log content\n @raise TypeError: if any input parameter is not of required type\n @raise ValueErro... | -555,151,353,747,243,800 | Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is... | source/openwarpgui/openwarp/services.py | generate_mesh | rhydar/Test | python | def generate_mesh(meshing_dir, params):
'\n Launch Mesh Generator to generate mesh.\n\n @param meshing_dir: the meshing directory\n @param params: the meshing parameters\n @return: the mesh generation log content\n @raise TypeError: if any input parameter is not of required type\n @raise ValueErro... |
def simulate(simulation_dir, params):
'\n Run simulation.\n\n @param simulation_dir: the simulation directory\n @param params: the simulation parameters\n @return: the simulation log content\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parame... | 119,379,641,207,331,200 | Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
... | source/openwarpgui/openwarp/services.py | simulate | rhydar/Test | python | def simulate(simulation_dir, params):
'\n Run simulation.\n\n @param simulation_dir: the simulation directory\n @param params: the simulation parameters\n @return: the simulation log content\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parame... |
def postprocess(simulation_dir, params):
'\n Run post-processing.\n\n @param simulation_dir: the simulation directory\n @param params: the post-processing parameters\n @return: the post-processing log content\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: i... | 5,305,011,815,453,659,000 | Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters ... | source/openwarpgui/openwarp/services.py | postprocess | rhydar/Test | python | def postprocess(simulation_dir, params):
'\n Run post-processing.\n\n @param simulation_dir: the simulation directory\n @param params: the post-processing parameters\n @return: the post-processing log content\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: i... |
def visualize(simulation_dir):
'\n Launch ParaView to visualize simulation results.\n\n @param simulation_dir: the simulation directory\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parameter is None/empty\n @raise ServiceError: if error occurs d... | -6,515,608,174,183,623,000 | Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView | source/openwarpgui/openwarp/services.py | visualize | rhydar/Test | python | def visualize(simulation_dir):
'\n Launch ParaView to visualize simulation results.\n\n @param simulation_dir: the simulation directory\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parameter is None/empty\n @raise ServiceError: if error occurs d... |
def prepare_paraview_script(script_path, files):
'\n Prepare a script to be run by ParaView from a template.\n\n @param script_path: path of the new script to create\n @param files: a list of data files path\n @raise Exception: to its caller if any error occurs\n '
with open(PARAVIEW_SCRIPT_TEMPL... | 8,403,920,437,899,173,000 | Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs | source/openwarpgui/openwarp/services.py | prepare_paraview_script | rhydar/Test | python | def prepare_paraview_script(script_path, files):
'\n Prepare a script to be run by ParaView from a template.\n\n @param script_path: path of the new script to create\n @param files: a list of data files path\n @raise Exception: to its caller if any error occurs\n '
with open(PARAVIEW_SCRIPT_TEMPL... |
def wrapper_io(func, fd, args, return_dict):
'\n Run a function while redirecting its output to a file descriptor\n Args:\n func: A python function to run\n fd: a file descriptor\n args: A tuple containing argument for the function\n return_dict: Dictionary where to put the result ... | 5,322,863,883,814,633,000 | Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function | source/openwarpgui/openwarp/services.py | wrapper_io | rhydar/Test | python | def wrapper_io(func, fd, args, return_dict):
'\n Run a function while redirecting its output to a file descriptor\n Args:\n func: A python function to run\n fd: a file descriptor\n args: A tuple containing argument for the function\n return_dict: Dictionary where to put the result ... |
def run_thread(func, args, fd):
'\n Run a python function in a thread and wait for it to complete.\n Redirect its output to fd\n Args:\n func: A python function to run\n args: A tuple containing argument for the function\n fd: a file descriptor\n '
manager = Manager()
return... | 7,948,268,067,498,178,000 | Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor | source/openwarpgui/openwarp/services.py | run_thread | rhydar/Test | python | def run_thread(func, args, fd):
'\n Run a python function in a thread and wait for it to complete.\n Redirect its output to fd\n Args:\n func: A python function to run\n args: A tuple containing argument for the function\n fd: a file descriptor\n '
manager = Manager()
return... |
def writeline_if_not_none(fout, data):
'\n Write one line to the specified file if data is not None.\n\n @param fout: the file object to write line in\n @param data: the data to write as line\n '
if (data is not None):
fout.write((str(data) + '\n')) | -718,766,003,065,456,600 | Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line | source/openwarpgui/openwarp/services.py | writeline_if_not_none | rhydar/Test | python | def writeline_if_not_none(fout, data):
'\n Write one line to the specified file if data is not None.\n\n @param fout: the file object to write line in\n @param data: the data to write as line\n '
if (data is not None):
fout.write((str(data) + '\n')) |
def prepare_dir(prefix):
'\n Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp\n prefixed given prefix as the directory name.\n\n @param prefix: the directory prefix\n @return: the meshing/simulation directory full path\n @raise TypeError: if any... | -1,163,555,207,973,630,700 | Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueErro... | source/openwarpgui/openwarp/services.py | prepare_dir | rhydar/Test | python | def prepare_dir(prefix):
'\n Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp\n prefixed given prefix as the directory name.\n\n @param prefix: the directory prefix\n @return: the meshing/simulation directory full path\n @raise TypeError: if any... |
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
'\n Initialize\n Args:\n self: The class itself\n stdout: the descriptor or file name where to redirect stdout\n stdout: the descriptor or file name where to redirect stdout\n mode: the outp... | 7,559,297,123,415,256,000 | Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode | source/openwarpgui/openwarp/services.py | __init__ | rhydar/Test | python | def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
'\n Initialize\n Args:\n self: The class itself\n stdout: the descriptor or file name where to redirect stdout\n stdout: the descriptor or file name where to redirect stdout\n mode: the outp... |
def __enter__(self):
'\n Enter the context\n Args:\n self: The class itself\n '
import sys
self.sys = sys
self.saved_streams = saved_streams = (sys.__stdout__, sys.__stderr__)
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
... | 2,806,386,025,515,811,000 | Enter the context
Args:
self: The class itself | source/openwarpgui/openwarp/services.py | __enter__ | rhydar/Test | python | def __enter__(self):
'\n Enter the context\n Args:\n self: The class itself\n '
import sys
self.sys = sys
self.saved_streams = saved_streams = (sys.__stdout__, sys.__stderr__)
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
... |
def __exit__(self, *args):
'\n Exit the context\n Args:\n self: The class itself\n args: other arguments\n '
sys = self.sys
for s in self.saved_streams:
s.flush()
map(os.dup2, self.saved_fds, self.fds)
(sys.stdout, sys.stderr) = self.saved_streams
... | 3,997,459,935,956,700,000 | Exit the context
Args:
self: The class itself
args: other arguments | source/openwarpgui/openwarp/services.py | __exit__ | rhydar/Test | python | def __exit__(self, *args):
'\n Exit the context\n Args:\n self: The class itself\n args: other arguments\n '
sys = self.sys
for s in self.saved_streams:
s.flush()
map(os.dup2, self.saved_fds, self.fds)
(sys.stdout, sys.stderr) = self.saved_streams
... |
def get_token(self):
'\n Generates an Azure SAS token for pre-authorizing a file upload.\n\n Returns a tuple in the following format: (token_dict, object_name), where\n - token_dict has a `token` key which contains the SAS token as a string\n - object_name is a string\n '
... | 4,943,684,934,307,843,000 | Generates an Azure SAS token for pre-authorizing a file upload.
Returns a tuple in the following format: (token_dict, object_name), where
- token_dict has a `token` key which contains the SAS token as a string
- object_name is a string | atst/domain/csp/file_uploads.py | get_token | philip-dds/atst | python | def get_token(self):
'\n Generates an Azure SAS token for pre-authorizing a file upload.\n\n Returns a tuple in the following format: (token_dict, object_name), where\n - token_dict has a `token` key which contains the SAS token as a string\n - object_name is a string\n '
... |
def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):
"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n ... | 8,845,800,174,379,827,000 | args:
root - path to the lasot dataset.
image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)
is used by default.
vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], th... | Stark-main/external/AR/ltr/dataset/lasot.py | __init__ | 2021-DGSW-Ensemble/Ensemble-AI | python | def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):
"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n ... |
def __init__(self, args, base_dir=Path.db_root_dir('pascal'), split='train'):
'\n :param base_dir: path to VOC dataset directory\n :param split: train/val\n :param transform: transform to apply\n '
super().__init__()
self._base_dir = base_dir
self._image_dir = os.path.join(se... | -1,031,724,533,508,999,200 | :param base_dir: path to VOC dataset directory
:param split: train/val
:param transform: transform to apply | dataloaders/datasets/pascal.py | __init__ | ChenyanWu/seg_super_pixel | python | def __init__(self, args, base_dir=Path.db_root_dir('pascal'), split='train'):
'\n :param base_dir: path to VOC dataset directory\n :param split: train/val\n :param transform: transform to apply\n '
super().__init__()
self._base_dir = base_dir
self._image_dir = os.path.join(se... |
def image_psf(image, stars, size=15, normalize=False, return_cutouts=False):
'\n Get global psf from image using photutils routines\n\n Parameters\n ----------\n image: np.ndarray or path\n stars: np.ndarray\n stars positions with shape (n,2)\n size: int\n size of the cuts around sta... | 6,398,578,586,264,584,000 | Get global psf from image using photutils routines
Parameters
----------
image: np.ndarray or path
stars: np.ndarray
stars positions with shape (n,2)
size: int
size of the cuts around stars (in pixels)
normalize: bool, optional
weather to normalize the cutout, default is False
Returns
-------
np.ndarray o... | prose/blocks/psf.py | image_psf | lgrcia/prose | python | def image_psf(image, stars, size=15, normalize=False, return_cutouts=False):
'\n Get global psf from image using photutils routines\n\n Parameters\n ----------\n image: np.ndarray or path\n stars: np.ndarray\n stars positions with shape (n,2)\n size: int\n size of the cuts around sta... |
def cutouts(image, stars, size=15):
'Custom version to extract stars cutouts\n\n Parameters\n ----------\n Parameters\n ----------\n image: np.ndarray or path\n stars: np.ndarray\n stars positions with shape (n,2)\n size: int\n size of the cuts around stars (in pixels), by default... | -796,233,181,387,293,200 | Custom version to extract stars cutouts
Parameters
----------
Parameters
----------
image: np.ndarray or path
stars: np.ndarray
stars positions with shape (n,2)
size: int
size of the cuts around stars (in pixels), by default 15
Returns
-------
np.ndarray of shape (size, size) | prose/blocks/psf.py | cutouts | lgrcia/prose | python | def cutouts(image, stars, size=15):
'Custom version to extract stars cutouts\n\n Parameters\n ----------\n Parameters\n ----------\n image: np.ndarray or path\n stars: np.ndarray\n stars positions with shape (n,2)\n size: int\n size of the cuts around stars (in pixels), by default... |
def moments(data):
'Returns (height, x, y, width_x, width_y)\n the gaussian parameters of a 2D distribution by calculating its\n moments '
height = data.max()
background = data.min()
data = (data - np.min(data))
total = data.sum()
(x, y) = np.indices(data.shape)
x = ((x * data).sum() /... | -6,192,023,492,880,741,000 | Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments | prose/blocks/psf.py | moments | lgrcia/prose | python | def moments(data):
'Returns (height, x, y, width_x, width_y)\n the gaussian parameters of a 2D distribution by calculating its\n moments '
height = data.max()
background = data.min()
data = (data - np.min(data))
total = data.sum()
(x, y) = np.indices(data.shape)
x = ((x * data).sum() /... |
def _correct_folder(folder: str) -> str:
"Ensures the folder follows a standard.\n\n Pathlib.parent in the root folder results in '.', whereas in other places\n we should use '' for the root folder. This function makes sure the root\n folder is always empty string.\n\n Args:\n folder: the folder to be correc... | 5,241,296,615,931,418,000 | Ensures the folder follows a standard.
Pathlib.parent in the root folder results in '.', whereas in other places
we should use '' for the root folder. This function makes sure the root
folder is always empty string.
Args:
folder: the folder to be corrected.
Returns:
The corrected folder. | tensorflow_datasets/core/github_api/github_path.py | _correct_folder | YangDong2002/datasets | python | def _correct_folder(folder: str) -> str:
"Ensures the folder follows a standard.\n\n Pathlib.parent in the root folder results in '.', whereas in other places\n we should use for the root folder. This function makes sure the root\n folder is always empty string.\n\n Args:\n folder: the folder to be correcte... |
def _parse_github_path(path: str) -> Tuple[(str, str, str)]:
'Parse the absolute github path.\n\n Args:\n path: The full github path.\n\n Returns:\n repo: The repository identifiant.\n branch: Repository branch.\n subpath: The inner path.\n\n Raises:\n ValueError: If the path is invalid\n '
e... | 7,302,151,068,288,946,000 | Parse the absolute github path.
Args:
path: The full github path.
Returns:
repo: The repository identifiant.
branch: Repository branch.
subpath: The inner path.
Raises:
ValueError: If the path is invalid | tensorflow_datasets/core/github_api/github_path.py | _parse_github_path | YangDong2002/datasets | python | def _parse_github_path(path: str) -> Tuple[(str, str, str)]:
'Parse the absolute github path.\n\n Args:\n path: The full github path.\n\n Returns:\n repo: The repository identifiant.\n branch: Repository branch.\n subpath: The inner path.\n\n Raises:\n ValueError: If the path is invalid\n '
e... |
def query(self, url: str) -> JsonValue:
'Launches a Github API query and returns the result.'
headers = {}
if self._token:
headers['Authorization'] = f'token {self._token}'
resp = requests.get(url, headers=headers)
if (resp.status_code != 200):
raise FileNotFoundError(f'''Request fai... | 2,435,902,409,758,191,000 | Launches a Github API query and returns the result. | tensorflow_datasets/core/github_api/github_path.py | query | YangDong2002/datasets | python | def query(self, url: str) -> JsonValue:
headers = {}
if self._token:
headers['Authorization'] = f'token {self._token}'
resp = requests.get(url, headers=headers)
if (resp.status_code != 200):
raise FileNotFoundError(f'Request failed:
Request: {url}
Error: {resp.status_code}
Reason... |
def query_tree(self, repo: str, branch: str) -> JsonValue:
'Queries a repository tree.\n\n See https://docs.github.com/en/rest/reference/git#trees\n\n Args:\n repo: the repository\n branch: the branch for which to get the tree\n\n Returns:\n JSON dict with the tree.\n '
url = f'https:... | -6,298,859,450,915,724,000 | Queries a repository tree.
See https://docs.github.com/en/rest/reference/git#trees
Args:
repo: the repository
branch: the branch for which to get the tree
Returns:
JSON dict with the tree. | tensorflow_datasets/core/github_api/github_path.py | query_tree | YangDong2002/datasets | python | def query_tree(self, repo: str, branch: str) -> JsonValue:
'Queries a repository tree.\n\n See https://docs.github.com/en/rest/reference/git#trees\n\n Args:\n repo: the repository\n branch: the branch for which to get the tree\n\n Returns:\n JSON dict with the tree.\n '
url = f'https:... |
@classmethod
def from_json(cls, value) -> '_GithubTree':
'Parses a GithubTree from the given JSON.'
if ((not isinstance(value, dict)) or ('tree' not in value)):
raise ValueError(f'Github API response not supported: {value}')
files_per_folder: MutableMapping[(str, Set[str])] = {}
for element in v... | 4,277,081,197,266,624,500 | Parses a GithubTree from the given JSON. | tensorflow_datasets/core/github_api/github_path.py | from_json | YangDong2002/datasets | python | @classmethod
def from_json(cls, value) -> '_GithubTree':
if ((not isinstance(value, dict)) or ('tree' not in value)):
raise ValueError(f'Github API response not supported: {value}')
files_per_folder: MutableMapping[(str, Set[str])] = {}
for element in value['tree']:
github_element = _Gi... |
@staticmethod
@functools.lru_cache(maxsize=None)
def from_cache(repo: str, branch: str) -> '_GithubTree':
'Factory which caches the entire Github tree.'
tree_json = GithubApi().query_tree(repo, branch)
assert (not tree_json.get('truncated', False))
return _GithubTree.from_json(tree_json) | -7,261,570,799,538,644,000 | Factory which caches the entire Github tree. | tensorflow_datasets/core/github_api/github_path.py | from_cache | YangDong2002/datasets | python | @staticmethod
@functools.lru_cache(maxsize=None)
def from_cache(repo: str, branch: str) -> '_GithubTree':
tree_json = GithubApi().query_tree(repo, branch)
assert (not tree_json.get('truncated', False))
return _GithubTree.from_json(tree_json) |
@classmethod
def from_repo(cls, repo: str, branch: str='master') -> 'GithubPath':
"Factory to creates a GithubPath from a repo name.\n\n Args:\n repo: Repo name (e.g. `tensorflow/datasets`)\n branch: Branch name (e.g. `master`, 'v1.2.0', 'EXAMPLE_KEY'). Default to\n master.\n\n Returns:\n ... | 6,186,761,111,058,657,000 | Factory to creates a GithubPath from a repo name.
Args:
repo: Repo name (e.g. `tensorflow/datasets`)
branch: Branch name (e.g. `master`, 'v1.2.0', 'EXAMPLE_KEY'). Default to
master.
Returns:
github_path: The repository root dir at head | tensorflow_datasets/core/github_api/github_path.py | from_repo | YangDong2002/datasets | python | @classmethod
def from_repo(cls, repo: str, branch: str='master') -> 'GithubPath':
"Factory to creates a GithubPath from a repo name.\n\n Args:\n repo: Repo name (e.g. `tensorflow/datasets`)\n branch: Branch name (e.g. `master`, 'v1.2.0', 'EXAMPLE_KEY'). Default to\n master.\n\n Returns:\n ... |
@property
def subpath(self) -> str:
'The inner path (e.g. `core/__init__.py`).'
return self._metadata.subpath | -8,259,573,421,062,299,000 | The inner path (e.g. `core/__init__.py`). | tensorflow_datasets/core/github_api/github_path.py | subpath | YangDong2002/datasets | python | @property
def subpath(self) -> str:
return self._metadata.subpath |
@property
def repo(self) -> str:
'The repository identifier (e.g. `tensorflow/datasets`).'
return self._metadata.repo | -4,497,430,041,800,673,000 | The repository identifier (e.g. `tensorflow/datasets`). | tensorflow_datasets/core/github_api/github_path.py | repo | YangDong2002/datasets | python | @property
def repo(self) -> str:
return self._metadata.repo |
@property
def branch(self) -> str:
'The branch (e.g. `master`, `v2`, `43bbad116df`,...).'
return self._metadata.branch | 6,229,227,912,246,695,000 | The branch (e.g. `master`, `v2`, `43bbad116df`,...). | tensorflow_datasets/core/github_api/github_path.py | branch | YangDong2002/datasets | python | @property
def branch(self) -> str:
return self._metadata.branch |
def as_raw_url(self) -> str:
'Returns the raw content url (https://raw.githubusercontent.com).'
return f'https://raw.githubusercontent.com/{self.repo}/{self.branch}/{self.subpath}' | -6,023,169,806,691,042,000 | Returns the raw content url (https://raw.githubusercontent.com). | tensorflow_datasets/core/github_api/github_path.py | as_raw_url | YangDong2002/datasets | python | def as_raw_url(self) -> str:
return f'https://raw.githubusercontent.com/{self.repo}/{self.branch}/{self.subpath}' |
def as_human_friendly_url(self) -> str:
'Returns the human friendly url.'
return f'https://github.com/{self.repo}/blob/{self.branch}/{self.subpath}' | -315,517,247,223,268,350 | Returns the human friendly url. | tensorflow_datasets/core/github_api/github_path.py | as_human_friendly_url | YangDong2002/datasets | python | def as_human_friendly_url(self) -> str:
return f'https://github.com/{self.repo}/blob/{self.branch}/{self.subpath}' |
def iterdir(self) -> Iterator['GithubPath']:
'Yields the sub-paths.'
if (not self.is_dir()):
raise NotADirectoryError(f'{self.subpath} is not a directory.')
for filename in self.github_tree.files_per_folder[self.subpath]:
(yield (self / filename.name)) | -7,273,975,935,590,726,000 | Yields the sub-paths. | tensorflow_datasets/core/github_api/github_path.py | iterdir | YangDong2002/datasets | python | def iterdir(self) -> Iterator['GithubPath']:
if (not self.is_dir()):
raise NotADirectoryError(f'{self.subpath} is not a directory.')
for filename in self.github_tree.files_per_folder[self.subpath]:
(yield (self / filename.name)) |
def is_dir(self) -> bool:
'Returns True if the path is a directory or submodule.'
return self.github_tree.is_folder(self.subpath) | -7,686,961,024,988,698,000 | Returns True if the path is a directory or submodule. | tensorflow_datasets/core/github_api/github_path.py | is_dir | YangDong2002/datasets | python | def is_dir(self) -> bool:
return self.github_tree.is_folder(self.subpath) |
def is_file(self) -> bool:
'Returns True if the path is a file.'
return self.github_tree.is_file(pathlib.PurePosixPath(self.subpath)) | -4,208,411,006,704,021,500 | Returns True if the path is a file. | tensorflow_datasets/core/github_api/github_path.py | is_file | YangDong2002/datasets | python | def is_file(self) -> bool:
return self.github_tree.is_file(pathlib.PurePosixPath(self.subpath)) |
def exists(self) -> bool:
'Returns True if the path exists.'
return (self.is_dir() or self.is_file()) | 3,038,122,737,878,214,700 | Returns True if the path exists. | tensorflow_datasets/core/github_api/github_path.py | exists | YangDong2002/datasets | python | def exists(self) -> bool:
return (self.is_dir() or self.is_file()) |
def read_bytes(self) -> bytes:
'Returns the file content as bytes.'
url = self.as_raw_url()
return get_content(url) | -303,842,329,040,540,740 | Returns the file content as bytes. | tensorflow_datasets/core/github_api/github_path.py | read_bytes | YangDong2002/datasets | python | def read_bytes(self) -> bytes:
url = self.as_raw_url()
return get_content(url) |
def read_text(self, encoding: Optional[str]=None) -> str:
'Returns the file content as string.'
return self.read_bytes().decode(encoding=(encoding or 'utf-8')) | -1,727,593,445,862,952,400 | Returns the file content as string. | tensorflow_datasets/core/github_api/github_path.py | read_text | YangDong2002/datasets | python | def read_text(self, encoding: Optional[str]=None) -> str:
return self.read_bytes().decode(encoding=(encoding or 'utf-8')) |
def copy(self, dst: utils.PathLike, overwrite: bool=False) -> utils.ReadWritePath:
'Copy the current file to the given destination.\n\n Args:\n dst: Target file. It can be any PathLike compatible path (e.g. `gs://...`)\n overwrite: Whether the file should be overwritten or not\n\n Returns:\n Th... | -5,535,817,320,094,609,000 | Copy the current file to the given destination.
Args:
dst: Target file. It can be any PathLike compatible path (e.g. `gs://...`)
overwrite: Whether the file should be overwritten or not
Returns:
The new created file.
Raises:
FileExistsError: If `overwrite` is false and destination exists. | tensorflow_datasets/core/github_api/github_path.py | copy | YangDong2002/datasets | python | def copy(self, dst: utils.PathLike, overwrite: bool=False) -> utils.ReadWritePath:
'Copy the current file to the given destination.\n\n Args:\n dst: Target file. It can be any PathLike compatible path (e.g. `gs://...`)\n overwrite: Whether the file should be overwritten or not\n\n Returns:\n Th... |
def glance_detail(request):
'\n OpenStack specific action to get image details from Glance\n :param request: HTTPRequest\n :return: rendered HTML\n '
required_fields = set(['imageId'])
if (not required_fields.issubset(request.POST)):
return render(request, 'ajax/ajaxError.html', {'error'... | 1,670,277,878,723,460,400 | OpenStack specific action to get image details from Glance
:param request: HTTPRequest
:return: rendered HTML | images/views.py | glance_detail | Juniper/wistar | python | def glance_detail(request):
'\n OpenStack specific action to get image details from Glance\n :param request: HTTPRequest\n :return: rendered HTML\n '
required_fields = set(['imageId'])
if (not required_fields.issubset(request.POST)):
return render(request, 'ajax/ajaxError.html', {'error'... |
def import_from_glance(request, glance_id):
'\n Creates a local db entry for the glance image\n Everything in Wistar depends on a db entry in the Images table\n If you have an existing openstack cluster, you may want to import those\n images here without having to physically copy the images to local dis... | 9,054,538,078,449,184,000 | Creates a local db entry for the glance image
Everything in Wistar depends on a db entry in the Images table
If you have an existing openstack cluster, you may want to import those
images here without having to physically copy the images to local disk
:param request: HTTPRequest object
:param glance_id: id of the glanc... | images/views.py | import_from_glance | Juniper/wistar | python | def import_from_glance(request, glance_id):
'\n Creates a local db entry for the glance image\n Everything in Wistar depends on a db entry in the Images table\n If you have an existing openstack cluster, you may want to import those\n images here without having to physically copy the images to local dis... |
def copy_bits(self, dest):
'copies the bits of the image to the provided destination address'
desc = _NuiSurfaceDesc()
PlanarImage._GetLevelDesc(self, 0, ctypes.byref(desc))
rect = _NuiLockedRect()
PlanarImage._LockRect(self, 0, ctypes.byref(rect), None, 0)
ctypes.memmove(dest, rect.bits, (desc.... | -4,890,728,892,160,774,000 | copies the bits of the image to the provided destination address | pykinect/nui/structs.py | copy_bits | howieraem/KinectActionDetection | python | def copy_bits(self, dest):
desc = _NuiSurfaceDesc()
PlanarImage._GetLevelDesc(self, 0, ctypes.byref(desc))
rect = _NuiLockedRect()
PlanarImage._LockRect(self, 0, ctypes.byref(rect), None, 0)
ctypes.memmove(dest, rect.bits, (desc.height * rect.pitch))
PlanarImage._UnlockRect(self, 0) |
def calculate_bone_orientations(self):
'Calculate bone orientations for a skeleton.\n\n The function calculates hierarchical and absolute joint angles for the skeleton, which can\n be used in animating an avatar (Avateering). The HipCenter joint is the root of the hierarchy,\n and describes an ... | 2,338,955,707,845,455,400 | Calculate bone orientations for a skeleton.
The function calculates hierarchical and absolute joint angles for the skeleton, which can
be used in animating an avatar (Avateering). The HipCenter joint is the root of the hierarchy,
and describes an absolute rotation in the right-hand camera coordinate system. All other
... | pykinect/nui/structs.py | calculate_bone_orientations | howieraem/KinectActionDetection | python | def calculate_bone_orientations(self):
'Calculate bone orientations for a skeleton.\n\n The function calculates hierarchical and absolute joint angles for the skeleton, which can\n be used in animating an avatar (Avateering). The HipCenter joint is the root of the hierarchy,\n and describes an ... |
@pytest.mark.parametrize('query_results, results_count', [([], 0), (MOCK_QUERY_RESULTS, len(MOCK_QUERY_RESULTS)), (MOCK_QUERY_RESULTS[:2], 2)])
def test_formats_count(app_instance, mocker, query_results, results_count):
'Test that results match high-level expectations.'
query = mocker.patch('AIPscan.Data.report... | -1,554,558,207,751,655,700 | Test that results match high-level expectations. | AIPscan/Data/tests/test_formats_count.py | test_formats_count | artefactual-labs/AIPscan | python | @pytest.mark.parametrize('query_results, results_count', [([], 0), (MOCK_QUERY_RESULTS, len(MOCK_QUERY_RESULTS)), (MOCK_QUERY_RESULTS[:2], 2)])
def test_formats_count(app_instance, mocker, query_results, results_count):
query = mocker.patch('AIPscan.Data.report_data._formats_count_query')
query.return_valu... |
@pytest.mark.parametrize('test_format', [mock_result for mock_result in MOCK_QUERY_RESULTS])
def test_formats_count_elements(app_instance, mocker, test_format):
'Test that structure of versions data matches expectations.'
mock_query = mocker.patch('AIPscan.Data.report_data._formats_count_query')
mock_query.... | 6,305,090,356,362,009,000 | Test that structure of versions data matches expectations. | AIPscan/Data/tests/test_formats_count.py | test_formats_count_elements | artefactual-labs/AIPscan | python | @pytest.mark.parametrize('test_format', [mock_result for mock_result in MOCK_QUERY_RESULTS])
def test_formats_count_elements(app_instance, mocker, test_format):
mock_query = mocker.patch('AIPscan.Data.report_data._formats_count_query')
mock_query.return_value = [test_format]
mock_get_ss_name = mocker.p... |
@pytest.mark.parametrize('start_date, end_date, format_count, total_file_count, total_file_size', [(None, None, 2, 3, TOTAL_FILE_SIZE), (DATE_BEFORE_AIP_1, None, 2, 3, TOTAL_FILE_SIZE), (AIP_1_CREATION_DATE, None, 2, 3, TOTAL_FILE_SIZE), (DATE_AFTER_AIP_1, None, 2, 2, JPEG_1_02_FILE_SIZE), (None, DATE_BEFORE_AIP_2, 1, ... | -4,245,950,110,821,830,000 | Test that content of response matches expectations.
This integration test uses a pre-populated fixture to verify that
the database access layer of our endpoint returns what we expect. | AIPscan/Data/tests/test_formats_count.py | test_formats_count_contents | artefactual-labs/AIPscan | python | @pytest.mark.parametrize('start_date, end_date, format_count, total_file_count, total_file_size', [(None, None, 2, 3, TOTAL_FILE_SIZE), (DATE_BEFORE_AIP_1, None, 2, 3, TOTAL_FILE_SIZE), (AIP_1_CREATION_DATE, None, 2, 3, TOTAL_FILE_SIZE), (DATE_AFTER_AIP_1, None, 2, 2, JPEG_1_02_FILE_SIZE), (None, DATE_BEFORE_AIP_2, 1, ... |
def __init__(self, ct_logs, db, cert_db, temp_db_factory, monitor_state_dir, agent=None, state_keeper_class=None):
'Initialize from a CtLogs proto.'
threading.Thread.__init__(self)
self.__monitors = []
self.__db = db
if (not agent):
agent = twisted_client.Agent(reactor)
if (not state_kee... | -455,337,136,996,825,500 | Initialize from a CtLogs proto. | vendor/github.com/google/certificate-transparency/python/ct/client/prober.py | __init__ | DavadDi/archon | python | def __init__(self, ct_logs, db, cert_db, temp_db_factory, monitor_state_dir, agent=None, state_keeper_class=None):
threading.Thread.__init__(self)
self.__monitors = []
self.__db = db
if (not agent):
agent = twisted_client.Agent(reactor)
if (not state_keeper_class):
state_keeper_... |
def prettify(self, elem):
'\n Return a pretty-printed XML string for the Element.\n '
rough_string = ElementTree.tostring(elem, 'utf8')
root = etree.fromstring(rough_string)
return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(' '.encode(), '\t'.encode())
... | -2,985,847,206,361,713,700 | Return a pretty-printed XML string for the Element. | libs/pascal_voc_io.py | prettify | yuxluo/umtri_video_label | python | def prettify(self, elem):
'\n \n '
rough_string = ElementTree.tostring(elem, 'utf8')
root = etree.fromstring(rough_string)
return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(' '.encode(), '\t'.encode())
'reparsed = minidom.parseString(rough_string)\n ... |
def genXML(self):
'\n Return XML root\n '
if ((self.filename is None) or (self.foldername is None) or (self.imgSize is None)):
return None
top = Element('annotation')
if self.verified:
top.set('verified', 'yes')
folder = SubElement(top, 'data_set')
folder.text =... | -2,343,235,265,172,610,600 | Return XML root | libs/pascal_voc_io.py | genXML | yuxluo/umtri_video_label | python | def genXML(self):
'\n \n '
if ((self.filename is None) or (self.foldername is None) or (self.imgSize is None)):
return None
top = Element('annotation')
if self.verified:
top.set('verified', 'yes')
folder = SubElement(top, 'data_set')
folder.text = self.foldernam... |
def log_to_dataframe(log_file, regex, headers, logformat):
' Function to transform log file to dataframe '
log_messages = []
linecount = 0
with open(log_file, 'r') as fin:
for line in fin.readlines():
try:
match = regex.search(line.strip())
message = [... | 2,983,973,701,135,370,000 | Function to transform log file to dataframe | logparser/SLCT/SLCT.py | log_to_dataframe | LogAnalysisTeam/logparser | python | def log_to_dataframe(log_file, regex, headers, logformat):
' '
log_messages = []
linecount = 0
with open(log_file, 'r') as fin:
for line in fin.readlines():
try:
match = regex.search(line.strip())
message = [match.group(header) for header in headers]
... |
def generate_logformat_regex(logformat):
' \n Function to generate regular expression to split log messages\n '
headers = []
splitters = re.split('(<[^<>]+>)', logformat)
regex = ''
for k in range(len(splitters)):
if ((k % 2) == 0):
splitter = re.sub(' +', '\\s+', splitters... | 76,824,784,785,815,180 | Function to generate regular expression to split log messages | logparser/SLCT/SLCT.py | generate_logformat_regex | LogAnalysisTeam/logparser | python | def generate_logformat_regex(logformat):
' \n \n '
headers = []
splitters = re.split('(<[^<>]+>)', logformat)
regex =
for k in range(len(splitters)):
if ((k % 2) == 0):
splitter = re.sub(' +', '\\s+', splitters[k])
regex += splitter
else:
he... |
@commands.command(hidden=True)
@commands.is_owner()
async def commandstats(self, ctx, limit=20):
'Shows command stats.\n\n Use a negative number for bottom instead of top.\n This is only for the current session.\n '
counter = self.bot.command_stats
width = len(max(counter, key=len))
... | -6,335,433,249,142,624,000 | Shows command stats.
Use a negative number for bottom instead of top.
This is only for the current session. | cogs/stats.py | commandstats | ymypengueni/RoboDanny | python | @commands.command(hidden=True)
@commands.is_owner()
async def commandstats(self, ctx, limit=20):
'Shows command stats.\n\n Use a negative number for bottom instead of top.\n This is only for the current session.\n '
counter = self.bot.command_stats
width = len(max(counter, key=len))
... |
@commands.command()
async def uptime(self, ctx):
'Tells you how long the bot has been up for.'
(await ctx.send(f'Uptime: **{self.get_bot_uptime()}**')) | 1,641,823,471,392,304,400 | Tells you how long the bot has been up for. | cogs/stats.py | uptime | ymypengueni/RoboDanny | python | @commands.command()
async def uptime(self, ctx):
(await ctx.send(f'Uptime: **{self.get_bot_uptime()}**')) |
@commands.command()
async def about(self, ctx):
'Tells you information about the bot itself.'
revision = self.get_last_commits()
embed = discord.Embed(description=('Latest Changes:\n' + revision))
embed.title = 'Official Bot Server Invite'
embed.url = 'https://discord.gg/DWEaqMy'
embed.colour = ... | -2,914,060,566,776,610,300 | Tells you information about the bot itself. | cogs/stats.py | about | ymypengueni/RoboDanny | python | @commands.command()
async def about(self, ctx):
revision = self.get_last_commits()
embed = discord.Embed(description=('Latest Changes:\n' + revision))
embed.title = 'Official Bot Server Invite'
embed.url = 'https://discord.gg/DWEaqMy'
embed.colour = discord.Colour.blurple()
owner = self.bot... |
@commands.group(invoke_without_command=True)
@commands.guild_only()
@commands.cooldown(1, 30.0, type=commands.BucketType.member)
async def stats(self, ctx, *, member: discord.Member=None):
'Tells you command usage stats for the server or a member.'
async with ctx.typing():
if (member is None):
... | 8,762,986,668,212,130,000 | Tells you command usage stats for the server or a member. | cogs/stats.py | stats | ymypengueni/RoboDanny | python | @commands.group(invoke_without_command=True)
@commands.guild_only()
@commands.cooldown(1, 30.0, type=commands.BucketType.member)
async def stats(self, ctx, *, member: discord.Member=None):
async with ctx.typing():
if (member is None):
(await self.show_guild_stats(ctx))
else:
... |
@stats.command(name='global')
@commands.is_owner()
async def stats_global(self, ctx):
'Global all time command statistics.'
query = 'SELECT COUNT(*) FROM commands;'
total = (await ctx.db.fetchrow(query))
e = discord.Embed(title='Command Stats', colour=discord.Colour.blurple())
e.description = f'{tot... | -1,137,157,467,025,001,100 | Global all time command statistics. | cogs/stats.py | stats_global | ymypengueni/RoboDanny | python | @stats.command(name='global')
@commands.is_owner()
async def stats_global(self, ctx):
query = 'SELECT COUNT(*) FROM commands;'
total = (await ctx.db.fetchrow(query))
e = discord.Embed(title='Command Stats', colour=discord.Colour.blurple())
e.description = f'{total[0]} commands used.'
lookup = (... |
@stats.command(name='today')
@commands.is_owner()
async def stats_today(self, ctx):
'Global command statistics for the day.'
query = "SELECT failed, COUNT(*) FROM commands WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day') GROUP BY failed;"
total = (await ctx.db.fetch(query))
failed = 0
success = 0... | 4,463,294,225,893,276,000 | Global command statistics for the day. | cogs/stats.py | stats_today | ymypengueni/RoboDanny | python | @stats.command(name='today')
@commands.is_owner()
async def stats_today(self, ctx):
query = "SELECT failed, COUNT(*) FROM commands WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day') GROUP BY failed;"
total = (await ctx.db.fetch(query))
failed = 0
success = 0
question = 0
for (state, count)... |
@commands.command(hidden=True)
@commands.is_owner()
async def bothealth(self, ctx):
'Various bot health monitoring tools.'
HEALTHY = discord.Colour(value=4437377)
UNHEALTHY = discord.Colour(value=15747399)
WARNING = discord.Colour(value=15769159)
total_warnings = 0
embed = discord.Embed(title='B... | 2,352,962,447,775,214,600 | Various bot health monitoring tools. | cogs/stats.py | bothealth | ymypengueni/RoboDanny | python | @commands.command(hidden=True)
@commands.is_owner()
async def bothealth(self, ctx):
HEALTHY = discord.Colour(value=4437377)
UNHEALTHY = discord.Colour(value=15747399)
WARNING = discord.Colour(value=15769159)
total_warnings = 0
embed = discord.Embed(title='Bot Health Report', colour=HEALTHY)
... |
@commands.command(hidden=True, aliases=['cancel_task'])
@commands.is_owner()
async def debug_task(self, ctx, memory_id: hex_value):
'Debug a task by a memory location.'
task = object_at(memory_id)
if ((task is None) or (not isinstance(task, asyncio.Task))):
return (await ctx.send(f'Could not find Ta... | 6,734,935,313,058,193,000 | Debug a task by a memory location. | cogs/stats.py | debug_task | ymypengueni/RoboDanny | python | @commands.command(hidden=True, aliases=['cancel_task'])
@commands.is_owner()
async def debug_task(self, ctx, memory_id: hex_value):
task = object_at(memory_id)
if ((task is None) or (not isinstance(task, asyncio.Task))):
return (await ctx.send(f'Could not find Task object at {hex(memory_id)}.'))
... |
@commands.group(hidden=True, invoke_without_command=True)
@commands.is_owner()
async def command_history(self, ctx):
'Command history.'
query = 'SELECT\n CASE failed\n WHEN TRUE THEN command || \' [!]\'\n ELSE command\n ... | -2,068,843,010,320,469,000 | Command history. | cogs/stats.py | command_history | ymypengueni/RoboDanny | python | @commands.group(hidden=True, invoke_without_command=True)
@commands.is_owner()
async def command_history(self, ctx):
query = 'SELECT\n CASE failed\n WHEN TRUE THEN command || \' [!]\'\n ELSE command\n END AS "co... |
@command_history.command(name='for')
@commands.is_owner()
async def command_history_for(self, ctx, days: typing.Optional[int]=7, *, command: str):
'Command history for a command.'
query = 'SELECT *, t.success + t.failed AS "total"\n FROM (\n SELECT guild_id,\n ... | -4,612,241,515,661,905,000 | Command history for a command. | cogs/stats.py | command_history_for | ymypengueni/RoboDanny | python | @command_history.command(name='for')
@commands.is_owner()
async def command_history_for(self, ctx, days: typing.Optional[int]=7, *, command: str):
query = 'SELECT *, t.success + t.failed AS "total"\n FROM (\n SELECT guild_id,\n SUM(CASE WHEN ... |
@command_history.command(name='guild', aliases=['server'])
@commands.is_owner()
async def command_history_guild(self, ctx, guild_id: int):
'Command history for a guild.'
query = 'SELECT\n CASE failed\n WHEN TRUE THEN command || \' [!]\'\n ... | 8,836,294,313,546,763,000 | Command history for a guild. | cogs/stats.py | command_history_guild | ymypengueni/RoboDanny | python | @command_history.command(name='guild', aliases=['server'])
@commands.is_owner()
async def command_history_guild(self, ctx, guild_id: int):
query = 'SELECT\n CASE failed\n WHEN TRUE THEN command || \' [!]\'\n ELSE command\n ... |
@command_history.command(name='user', aliases=['member'])
@commands.is_owner()
async def command_history_user(self, ctx, user_id: int):
'Command history for a user.'
query = 'SELECT\n CASE failed\n WHEN TRUE THEN command || \' [!]\'\n ... | 6,525,985,542,778,223,000 | Command history for a user. | cogs/stats.py | command_history_user | ymypengueni/RoboDanny | python | @command_history.command(name='user', aliases=['member'])
@commands.is_owner()
async def command_history_user(self, ctx, user_id: int):
query = 'SELECT\n CASE failed\n WHEN TRUE THEN command || \' [!]\'\n ELSE command\n ... |
@command_history.command(name='log')
@commands.is_owner()
async def command_history_log(self, ctx, days=7):
'Command history log for the last N days.'
query = 'SELECT command, COUNT(*)\n FROM commands\n WHERE used > (CURRENT_TIMESTAMP - $1::interval)\n GROUP... | -7,858,791,333,496,422,000 | Command history log for the last N days. | cogs/stats.py | command_history_log | ymypengueni/RoboDanny | python | @command_history.command(name='log')
@commands.is_owner()
async def command_history_log(self, ctx, days=7):
query = 'SELECT command, COUNT(*)\n FROM commands\n WHERE used > (CURRENT_TIMESTAMP - $1::interval)\n GROUP BY command\n ORDER BY 2... |
@command_history.command(name='cog')
@commands.is_owner()
async def command_history_cog(self, ctx, days: typing.Optional[int]=7, *, cog: str=None):
'Command history for a cog or grouped by a cog.'
interval = datetime.timedelta(days=days)
if (cog is not None):
cog = self.bot.get_cog(cog)
if (... | -2,708,958,935,261,370,000 | Command history for a cog or grouped by a cog. | cogs/stats.py | command_history_cog | ymypengueni/RoboDanny | python | @command_history.command(name='cog')
@commands.is_owner()
async def command_history_cog(self, ctx, days: typing.Optional[int]=7, *, cog: str=None):
interval = datetime.timedelta(days=days)
if (cog is not None):
cog = self.bot.get_cog(cog)
if (cog is None):
return (await ctx.send... |
def test_get_points_within_radius_of_cameras():
'Verify that points that fall outside of 10 meter radius of two camera poses.\n\n Cameras are placed at (0,0,0) and (10,0,0).\n '
wTi0 = Pose3(Rot3(), np.zeros(3))
wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
wTi_list = [wTi0, wTi1]
points_3d = n... | -5,620,679,280,221,742,000 | Verify that points that fall outside of 10 meter radius of two camera poses.
Cameras are placed at (0,0,0) and (10,0,0). | tests/utils/test_geometry_comparisons.py | test_get_points_within_radius_of_cameras | yuancaimaiyi/gtsfm | python | def test_get_points_within_radius_of_cameras():
'Verify that points that fall outside of 10 meter radius of two camera poses.\n\n Cameras are placed at (0,0,0) and (10,0,0).\n '
wTi0 = Pose3(Rot3(), np.zeros(3))
wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
wTi_list = [wTi0, wTi1]
points_3d = n... |
def test_get_points_within_radius_of_cameras_negative_radius():
'Catch degenerate input.'
wTi0 = Pose3(Rot3(), np.zeros(3))
wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
wTi_list = [wTi0, wTi1]
points_3d = np.array([[(- 15), 0, 0], [0, 15, 0], [(- 5), 0, 0], [15, 0, 0], [25, 0, 0]])
radius = (- 5... | -7,282,060,333,028,995,000 | Catch degenerate input. | tests/utils/test_geometry_comparisons.py | test_get_points_within_radius_of_cameras_negative_radius | yuancaimaiyi/gtsfm | python | def test_get_points_within_radius_of_cameras_negative_radius():
wTi0 = Pose3(Rot3(), np.zeros(3))
wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
wTi_list = [wTi0, wTi1]
points_3d = np.array([[(- 15), 0, 0], [0, 15, 0], [(- 5), 0, 0], [15, 0, 0], [25, 0, 0]])
radius = (- 5)
nearby_points_3d = ... |
def test_get_points_within_radius_of_cameras_no_points():
'Catch degenerate input.'
wTi0 = Pose3(Rot3(), np.zeros(3))
wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
wTi_list = [wTi0, wTi1]
points_3d = np.zeros((0, 3))
radius = 10.0
nearby_points_3d = geometry_comparisons.get_points_within_radi... | 1,175,814,159,659,153,700 | Catch degenerate input. | tests/utils/test_geometry_comparisons.py | test_get_points_within_radius_of_cameras_no_points | yuancaimaiyi/gtsfm | python | def test_get_points_within_radius_of_cameras_no_points():
wTi0 = Pose3(Rot3(), np.zeros(3))
wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
wTi_list = [wTi0, wTi1]
points_3d = np.zeros((0, 3))
radius = 10.0
nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, p... |
def test_get_points_within_radius_of_cameras_no_poses():
'Catch degenerate input.'
wTi_list = []
points_3d = np.array([[(- 15), 0, 0], [0, 15, 0], [(- 5), 0, 0], [15, 0, 0], [25, 0, 0]])
radius = 10.0
nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, ra... | -8,889,441,781,887,105,000 | Catch degenerate input. | tests/utils/test_geometry_comparisons.py | test_get_points_within_radius_of_cameras_no_poses | yuancaimaiyi/gtsfm | python | def test_get_points_within_radius_of_cameras_no_poses():
wTi_list = []
points_3d = np.array([[(- 15), 0, 0], [0, 15, 0], [(- 5), 0, 0], [15, 0, 0], [25, 0, 0]])
radius = 10.0
nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius)
assert (nearby_... |
def test_align_rotations(self):
'Tests the alignment of rotations.'
input_list = [Rot3.RzRyRx(np.deg2rad(0), np.deg2rad((- 10)), 0), Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(30), 0)]
ref_list = [Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(80), 0), Rot3.RzRyRx(np.deg2rad(0), np.deg2rad((- 40)), 0)]
computed = geo... | -3,114,668,187,556,526,000 | Tests the alignment of rotations. | tests/utils/test_geometry_comparisons.py | test_align_rotations | yuancaimaiyi/gtsfm | python | def test_align_rotations(self):
input_list = [Rot3.RzRyRx(np.deg2rad(0), np.deg2rad((- 10)), 0), Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(30), 0)]
ref_list = [Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(80), 0), Rot3.RzRyRx(np.deg2rad(0), np.deg2rad((- 40)), 0)]
computed = geometry_comparisons.align_rotations(i... |
def test_align_poses_after_sim3_transform(self):
'Test for alignment of poses after applying a SIM3 transformation.'
translation_shift = np.array([5, 10, (- 5)])
rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30))
scaling_factor = 0.7
transform = Similarity3(rotation_shift, translation_shift, scaling... | 5,758,185,141,826,801,000 | Test for alignment of poses after applying a SIM3 transformation. | tests/utils/test_geometry_comparisons.py | test_align_poses_after_sim3_transform | yuancaimaiyi/gtsfm | python | def test_align_poses_after_sim3_transform(self):
translation_shift = np.array([5, 10, (- 5)])
rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30))
scaling_factor = 0.7
transform = Similarity3(rotation_shift, translation_shift, scaling_factor)
ref_list = [transform.transformFrom(x) for x in sample... |
def test_align_poses_on_panorama_after_sim3_transform(self):
'Test for alignment of poses after applying a forward motion transformation.'
translation_shift = np.array([0, 5, 0])
rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30))
scaling_factor = 1.0
aTi_list = sample_poses.PANORAMA_GLOBAL_POSES
... | -5,600,359,633,403,404,000 | Test for alignment of poses after applying a forward motion transformation. | tests/utils/test_geometry_comparisons.py | test_align_poses_on_panorama_after_sim3_transform | yuancaimaiyi/gtsfm | python | def test_align_poses_on_panorama_after_sim3_transform(self):
translation_shift = np.array([0, 5, 0])
rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30))
scaling_factor = 1.0
aTi_list = sample_poses.PANORAMA_GLOBAL_POSES
bSa = Similarity3(rotation_shift, translation_shift, scaling_factor)
bTi... |
@patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=[Rot3.RzRyRx(0, np.deg2rad(32), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), Rot3.RzRyRx(0, 0, np.deg2rad(83))])
def test_compare_rotations_with_all_valid_rot3s_success(self, align_rotations_mocked):
'Tests the comparison results on list of rotat... | -6,894,803,049,350,792,000 | Tests the comparison results on list of rotations. | tests/utils/test_geometry_comparisons.py | test_compare_rotations_with_all_valid_rot3s_success | yuancaimaiyi/gtsfm | python | @patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=[Rot3.RzRyRx(0, np.deg2rad(32), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), Rot3.RzRyRx(0, 0, np.deg2rad(83))])
def test_compare_rotations_with_all_valid_rot3s_success(self, align_rotations_mocked):
aRi_list = [Rot3.RzRyRx(0, np.deg2rad(25)... |
@patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=[Rot3.RzRyRx(0, np.deg2rad(32), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), Rot3.RzRyRx(0, 0, np.deg2rad(83))])
def test_compare_rotations_with_all_valid_rot3s_failure(self, align_rotations_mocked):
'Tests the comparison results on list of rotat... | 4,278,046,596,358,366,000 | Tests the comparison results on list of rotations. | tests/utils/test_geometry_comparisons.py | test_compare_rotations_with_all_valid_rot3s_failure | yuancaimaiyi/gtsfm | python | @patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=[Rot3.RzRyRx(0, np.deg2rad(32), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), Rot3.RzRyRx(0, 0, np.deg2rad(83))])
def test_compare_rotations_with_all_valid_rot3s_failure(self, align_rotations_mocked):
aRi_list = [Rot3.RzRyRx(0, np.deg2rad(25)... |
@patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=[Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20)))])
def test_compare_rotations_with_nones_at_same_indices(self, align_rotations_mocked):
'Tests the comparison results on list of rotations.'
list1 = [Rot3.RzRyRx(0, np... | 5,023,609,743,041,675,000 | Tests the comparison results on list of rotations. | tests/utils/test_geometry_comparisons.py | test_compare_rotations_with_nones_at_same_indices | yuancaimaiyi/gtsfm | python | @patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=[Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20)))])
def test_compare_rotations_with_nones_at_same_indices(self, align_rotations_mocked):
list1 = [Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20... |
@patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=None)
def test_compare_rotations_with_nones_at_different_indices(self, aligned_rotations_mocked):
'Tests the comparison results on list of rotations.'
list1 = [Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20))), None]
... | 2,320,318,608,087,057,000 | Tests the comparison results on list of rotations. | tests/utils/test_geometry_comparisons.py | test_compare_rotations_with_nones_at_different_indices | yuancaimaiyi/gtsfm | python | @patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=None)
def test_compare_rotations_with_nones_at_different_indices(self, aligned_rotations_mocked):
list1 = [Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20))), None]
list2 = [Rot3.RzRyRx(0, np.deg2rad(31), 0), None... |
def test_compute_relative_rotation_angle(self):
'Tests the relative angle between two rotations.'
R_1 = Rot3.RzRyRx(0, np.deg2rad(45), np.deg2rad(22.5))
R_2 = Rot3.RzRyRx(0, np.deg2rad(90), np.deg2rad(22.5))
computed_deg = geometry_comparisons.compute_relative_rotation_angle(R_1, R_2)
expected_deg =... | 7,784,699,289,404,444,000 | Tests the relative angle between two rotations. | tests/utils/test_geometry_comparisons.py | test_compute_relative_rotation_angle | yuancaimaiyi/gtsfm | python | def test_compute_relative_rotation_angle(self):
R_1 = Rot3.RzRyRx(0, np.deg2rad(45), np.deg2rad(22.5))
R_2 = Rot3.RzRyRx(0, np.deg2rad(90), np.deg2rad(22.5))
computed_deg = geometry_comparisons.compute_relative_rotation_angle(R_1, R_2)
expected_deg = 45
np.testing.assert_allclose(computed_deg, ... |
def test_compute_relative_unit_translation_angle(self):
'Tests the relative angle between two unit-translations.'
U_1 = Unit3(np.array([1, 0, 0]))
U_2 = Unit3(np.array([0.5, 0.5, 0]))
computed_deg = geometry_comparisons.compute_relative_unit_translation_angle(U_1, U_2)
expected_deg = 45
self.ass... | -7,068,969,421,371,341,000 | Tests the relative angle between two unit-translations. | tests/utils/test_geometry_comparisons.py | test_compute_relative_unit_translation_angle | yuancaimaiyi/gtsfm | python | def test_compute_relative_unit_translation_angle(self):
U_1 = Unit3(np.array([1, 0, 0]))
U_2 = Unit3(np.array([0.5, 0.5, 0]))
computed_deg = geometry_comparisons.compute_relative_unit_translation_angle(U_1, U_2)
expected_deg = 45
self.assertAlmostEqual(computed_deg, expected_deg, places=3) |
def test_align_poses_sim3_ignore_missing(self):
'Consider a simple cases with 4 poses in a line. Suppose SfM only recovers 2 of the 4 poses.'
wT0 = Pose3(Rot3(np.eye(3)), np.zeros(3))
wT1 = Pose3(Rot3(np.eye(3)), np.ones(3))
wT2 = Pose3(Rot3(np.eye(3)), (np.ones(3) * 2))
wT3 = Pose3(Rot3(np.eye(3)),... | 8,555,432,123,078,173,000 | Consider a simple cases with 4 poses in a line. Suppose SfM only recovers 2 of the 4 poses. | tests/utils/test_geometry_comparisons.py | test_align_poses_sim3_ignore_missing | yuancaimaiyi/gtsfm | python | def test_align_poses_sim3_ignore_missing(self):
wT0 = Pose3(Rot3(np.eye(3)), np.zeros(3))
wT1 = Pose3(Rot3(np.eye(3)), np.ones(3))
wT2 = Pose3(Rot3(np.eye(3)), (np.ones(3) * 2))
wT3 = Pose3(Rot3(np.eye(3)), (np.ones(3) * 3))
aTi_list = [wT0, wT1, wT2, wT3]
bTi_list = [None, wT1, None, wT3]
... |
def rsync_snapshots(machine_name, simulation_directory_from='', simulation_directory_to='.', snapshot_indices=snapshot_indices_keep):
"\n Use rsync to copy snapshot file[s].\n\n Parameters\n ----------\n machine_name : str : 'pfe', 'stampede', 'bw', 'peloton'\n directory_from : str : directory to cop... | -6,493,826,717,357,637,000 | Use rsync to copy snapshot file[s].
Parameters
----------
machine_name : str : 'pfe', 'stampede', 'bw', 'peloton'
directory_from : str : directory to copy from
directory_to : str : local directory to put snapshots
snapshot_indices : int or list : index[s] of snapshots to transfer | students_final_projects/group-f/gizmo_analysis/gizmo_file.py | rsync_snapshots | UAPH4582/PH482_582 | python | def rsync_snapshots(machine_name, simulation_directory_from=, simulation_directory_to='.', snapshot_indices=snapshot_indices_keep):
"\n Use rsync to copy snapshot file[s].\n\n Parameters\n ----------\n machine_name : str : 'pfe', 'stampede', 'bw', 'peloton'\n directory_from : str : directory to copy ... |
def rsync_simulation_files(machine_name, directory_from='/oldscratch/projects/xsede/GalaxiesOnFIRE', directory_to='.'):
"\n Use rsync to copy simulation files.\n\n Parameters\n ----------\n machine_name : str : 'pfe', 'stampede', 'bw', 'peloton'\n directory_from : str : directory to copy from\n di... | -2,482,469,648,714,984,400 | Use rsync to copy simulation files.
Parameters
----------
machine_name : str : 'pfe', 'stampede', 'bw', 'peloton'
directory_from : str : directory to copy from
directory_to : str : directory to put files | students_final_projects/group-f/gizmo_analysis/gizmo_file.py | rsync_simulation_files | UAPH4582/PH482_582 | python | def rsync_simulation_files(machine_name, directory_from='/oldscratch/projects/xsede/GalaxiesOnFIRE', directory_to='.'):
"\n Use rsync to copy simulation files.\n\n Parameters\n ----------\n machine_name : str : 'pfe', 'stampede', 'bw', 'peloton'\n directory_from : str : directory to copy from\n di... |
def delete_snapshots(snapshot_directory='output', snapshot_index_limits=[1, 599], delete_halos=False):
'\n Delete all snapshots in given directory within snapshot_index_limits,\n except for those in snapshot_indices_keep list.\n\n Parameters\n ----------\n snapshot_directory : str : directory of snap... | 3,725,941,081,509,534,000 | Delete all snapshots in given directory within snapshot_index_limits,
except for those in snapshot_indices_keep list.
Parameters
----------
snapshot_directory : str : directory of snapshots
snapshot_index_limits : list : min and max snapshot indices to delete
delete_halos : bool : whether to delete halo catalog files ... | students_final_projects/group-f/gizmo_analysis/gizmo_file.py | delete_snapshots | UAPH4582/PH482_582 | python | def delete_snapshots(snapshot_directory='output', snapshot_index_limits=[1, 599], delete_halos=False):
'\n Delete all snapshots in given directory within snapshot_index_limits,\n except for those in snapshot_indices_keep list.\n\n Parameters\n ----------\n snapshot_directory : str : directory of snap... |
def compress_snapshots(self, directory='output', directory_out='', snapshot_index_limits=[0, 600], thread_number=1):
'\n Compress all snapshots in input directory.\n\n Parameters\n ----------\n directory : str : directory of snapshots\n directory_out : str : directory to write com... | -3,041,412,715,459,912,000 | Compress all snapshots in input directory.
Parameters
----------
directory : str : directory of snapshots
directory_out : str : directory to write compressed snapshots
snapshot_index_limits : list : min and max snapshot indices to compress
syncronize : bool : whether to synchronize parallel tasks,
wait for each th... | students_final_projects/group-f/gizmo_analysis/gizmo_file.py | compress_snapshots | UAPH4582/PH482_582 | python | def compress_snapshots(self, directory='output', directory_out=, snapshot_index_limits=[0, 600], thread_number=1):
'\n Compress all snapshots in input directory.\n\n Parameters\n ----------\n directory : str : directory of snapshots\n directory_out : str : directory to write compr... |
def compress_snapshot(self, directory='output', directory_out='', snapshot_index=600, analysis_directory='~/analysis', python_executable='python3'):
'\n Compress single snapshot (which may be multiple files) in input directory.\n\n Parameters\n ----------\n directory : str : directory of... | 4,972,527,731,161,589,000 | Compress single snapshot (which may be multiple files) in input directory.
Parameters
----------
directory : str : directory of snapshot
directory_out : str : directory to write compressed snapshot
snapshot_index : int : index of snapshot
analysis_directory : str : directory of analysis code | students_final_projects/group-f/gizmo_analysis/gizmo_file.py | compress_snapshot | UAPH4582/PH482_582 | python | def compress_snapshot(self, directory='output', directory_out=, snapshot_index=600, analysis_directory='~/analysis', python_executable='python3'):
'\n Compress single snapshot (which may be multiple files) in input directory.\n\n Parameters\n ----------\n directory : str : directory of s... |
def test_compression(self, snapshot_indices='all', simulation_directory='.', snapshot_directory='output', compression_level=0):
'\n Read headers from all snapshot files in simulation_directory to check whether files have\n been compressed.\n '
header_compression_name = 'compression.level'
... | 6,285,998,842,586,060,000 | Read headers from all snapshot files in simulation_directory to check whether files have
been compressed. | students_final_projects/group-f/gizmo_analysis/gizmo_file.py | test_compression | UAPH4582/PH482_582 | python | def test_compression(self, snapshot_indices='all', simulation_directory='.', snapshot_directory='output', compression_level=0):
'\n Read headers from all snapshot files in simulation_directory to check whether files have\n been compressed.\n '
header_compression_name = 'compression.level'
... |
def submit_transfer(self, simulation_path_directory='.', snapshot_directory='output', batch_file_name='globus_batch.txt', machine_name='peloton'):
"\n Submit globus transfer of simulation files.\n Must initiate from Stampede.\n\n Parameters\n ----------\n simulation_path_directory... | 8,423,068,657,635,517,000 | Submit globus transfer of simulation files.
Must initiate from Stampede.
Parameters
----------
simulation_path_directory : str : '.' or full path + directory of simulation
snapshot_directory : str : directory of snapshot files within simulation_directory
batch_file_name : str : name of file to write
machine_name : str... | students_final_projects/group-f/gizmo_analysis/gizmo_file.py | submit_transfer | UAPH4582/PH482_582 | python | def submit_transfer(self, simulation_path_directory='.', snapshot_directory='output', batch_file_name='globus_batch.txt', machine_name='peloton'):
"\n Submit globus transfer of simulation files.\n Must initiate from Stampede.\n\n Parameters\n ----------\n simulation_path_directory... |
def write_batch_file(self, simulation_directory='.', snapshot_directory='output', file_name='globus_batch.txt'):
'\n Write batch file that sets files to transfer via globus.\n\n Parameters\n ----------\n simulation_directory : str : directory of simulation\n snapshot_directory : s... | -8,373,690,995,233,226,000 | Write batch file that sets files to transfer via globus.
Parameters
----------
simulation_directory : str : directory of simulation
snapshot_directory : str : directory of snapshot files within simulation_directory
file_name : str : name of batch file to write | students_final_projects/group-f/gizmo_analysis/gizmo_file.py | write_batch_file | UAPH4582/PH482_582 | python | def write_batch_file(self, simulation_directory='.', snapshot_directory='output', file_name='globus_batch.txt'):
'\n Write batch file that sets files to transfer via globus.\n\n Parameters\n ----------\n simulation_directory : str : directory of simulation\n snapshot_directory : s... |
def __init__(self):
'\n Initialise service and configuration\n '
logger.info('Initialised Backend-Service - Ready for gRPC Calls.') | -7,262,744,825,607,908,000 | Initialise service and configuration | backend-service/backend_service/service.py | __init__ | dgildeh/otel-python-cloud-run | python | def __init__(self):
'\n \n '
logger.info('Initialised Backend-Service - Ready for gRPC Calls.') |
def test_parse_nexus_tree(self):
'parse_nexus_tree returns a dnd string and a translation table list'
(Trans_table, dnd) = parse_nexus_tree(Nexus_tree)
self.assertEqual(dnd['tree PAUP_1'], '(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);')
self.assertEqual(Tr... | -4,173,345,485,592,903,700 | parse_nexus_tree returns a dnd string and a translation table list | tests/test_parse/test_nexus.py | test_parse_nexus_tree | tla256/cogent3 | python | def test_parse_nexus_tree(self):
(Trans_table, dnd) = parse_nexus_tree(Nexus_tree)
self.assertEqual(dnd['tree PAUP_1'], '(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);')
self.assertEqual(Trans_table['1'], 'outgroup25')
self.assertEqual(Trans_table['2'],... |
def test_parse_nexus_tree_sq(self):
'remove single quotes from tree and translate tables'
(Trans_table, dnd) = parse_nexus_tree(Nexus_tree_3)
self.assertEqual(dnd['tree PAUP_1'], '(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);')
self.assertEqual(Trans_table[... | -1,260,149,600,308,407,300 | remove single quotes from tree and translate tables | tests/test_parse/test_nexus.py | test_parse_nexus_tree_sq | tla256/cogent3 | python | def test_parse_nexus_tree_sq(self):
(Trans_table, dnd) = parse_nexus_tree(Nexus_tree_3)
self.assertEqual(dnd['tree PAUP_1'], '(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);')
self.assertEqual(Trans_table['1'], 'outgroup25')
self.assertEqual(Trans_table[... |
def test_get_tree_info(self):
'get_tree_info returns the Nexus file section that describes the tree'
result = get_tree_info(Nexus_tree)
self.assertEqual(len(result), 33)
self.assertEqual(result[0], 'Begin trees; [Treefile saved Wednesday, May 5, 2004 5:02 PM]')
self.assertEqual(result[31], 'tree P... | 2,344,120,915,064,810,500 | get_tree_info returns the Nexus file section that describes the tree | tests/test_parse/test_nexus.py | test_get_tree_info | tla256/cogent3 | python | def test_get_tree_info(self):
result = get_tree_info(Nexus_tree)
self.assertEqual(len(result), 33)
self.assertEqual(result[0], 'Begin trees; [Treefile saved Wednesday, May 5, 2004 5:02 PM]')
self.assertEqual(result[31], 'tree PAUP_1 = [&R] (1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17)... |
def test_split_tree_info(self):
'split_tree_info splits lines into header, Trans_table, and dnd'
tree_info = get_tree_info(Nexus_tree)
(header, trans_table, dnd) = split_tree_info(tree_info)
self.assertEqual(len(header), 9)
self.assertEqual(len(trans_table), 22)
self.assertEqual(len(dnd), 2)
... | -4,046,928,395,436,165,600 | split_tree_info splits lines into header, Trans_table, and dnd | tests/test_parse/test_nexus.py | test_split_tree_info | tla256/cogent3 | python | def test_split_tree_info(self):
tree_info = get_tree_info(Nexus_tree)
(header, trans_table, dnd) = split_tree_info(tree_info)
self.assertEqual(len(header), 9)
self.assertEqual(len(trans_table), 22)
self.assertEqual(len(dnd), 2)
self.assertEqual(header[0], 'Begin trees; [Treefile saved Wedn... |
def test_parse_trans_table(self):
'parse_trans_table returns a dict with the taxa names indexed by number'
tree_info = get_tree_info(Nexus_tree)
(header, trans_table, dnd) = split_tree_info(tree_info)
Trans_table = parse_trans_table(trans_table)
self.assertEqual(len(Trans_table), 21)
self.assert... | -6,303,950,845,564,525,000 | parse_trans_table returns a dict with the taxa names indexed by number | tests/test_parse/test_nexus.py | test_parse_trans_table | tla256/cogent3 | python | def test_parse_trans_table(self):
tree_info = get_tree_info(Nexus_tree)
(header, trans_table, dnd) = split_tree_info(tree_info)
Trans_table = parse_trans_table(trans_table)
self.assertEqual(len(Trans_table), 21)
self.assertEqual(Trans_table['1'], 'outgroup25')
self.assertEqual(Trans_table['... |
def test_parse_dnd(self):
'parse_dnd returns a dict with dnd indexed by tree name'
tree_info = get_tree_info(Nexus_tree)
(header, trans_table, dnd) = split_tree_info(tree_info)
dnd_dict = parse_dnd(dnd)
self.assertEqual(dnd_dict['tree PAUP_1'], '(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),... | 5,552,166,177,345,554,000 | parse_dnd returns a dict with dnd indexed by tree name | tests/test_parse/test_nexus.py | test_parse_dnd | tla256/cogent3 | python | def test_parse_dnd(self):
tree_info = get_tree_info(Nexus_tree)
(header, trans_table, dnd) = split_tree_info(tree_info)
dnd_dict = parse_dnd(dnd)
self.assertEqual(dnd_dict['tree PAUP_1'], '(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);') |
def test_get_BL_table(self):
'get_BL_table returns the section of the log file w/ the BL table'
BL_table = get_BL_table(PAUP_log)
self.assertEqual(len(BL_table), 40)
self.assertEqual(BL_table[0], ' 40 root 0 0 0')
self.assertEqual(BL_table[3... | 4,894,002,121,790,787,000 | get_BL_table returns the section of the log file w/ the BL table | tests/test_parse/test_nexus.py | test_get_BL_table | tla256/cogent3 | python | def test_get_BL_table(self):
BL_table = get_BL_table(PAUP_log)
self.assertEqual(len(BL_table), 40)
self.assertEqual(BL_table[0], ' 40 root 0 0 0')
self.assertEqual(BL_table[39], 'outgroup258 (21)* 40 45 27 ... |
def test_find_fields(self):
'find_fields takes BL table line and returns field names mapped to info'
result = find_fields(line1)
self.assertEqual(result['taxa'], '40')
self.assertEqual(result['bl'], '0')
self.assertEqual(result['parent'], 'root') | -6,646,046,789,345,280,000 | find_fields takes BL table line and returns field names mapped to info | tests/test_parse/test_nexus.py | test_find_fields | tla256/cogent3 | python | def test_find_fields(self):
result = find_fields(line1)
self.assertEqual(result['taxa'], '40')
self.assertEqual(result['bl'], '0')
self.assertEqual(result['parent'], 'root') |
def test_parse_taxa(self):
'parse_taxa should return the taxa # from a taxa_field from find_fields'
result1 = find_fields(line1)
result2 = find_fields(line2)
result3 = find_fields(line3)
result4 = find_fields(line4)
self.assertEqual(parse_taxa(result1['taxa']), '40')
self.assertEqual(parse_t... | -8,151,969,477,806,218,000 | parse_taxa should return the taxa # from a taxa_field from find_fields | tests/test_parse/test_nexus.py | test_parse_taxa | tla256/cogent3 | python | def test_parse_taxa(self):
result1 = find_fields(line1)
result2 = find_fields(line2)
result3 = find_fields(line3)
result4 = find_fields(line4)
self.assertEqual(parse_taxa(result1['taxa']), '40')
self.assertEqual(parse_taxa(result2['taxa']), '1')
self.assertEqual(parse_taxa(result3['taxa... |
def test_parse_PAUP_log(self):
'parse_PAUP_log extracts branch length info from a PAUP log file'
BL_dict = parse_PAUP_log(PAUP_log)
self.assertEqual(len(BL_dict), 40)
self.assertEqual(BL_dict['1'], ('40', 40))
self.assertEqual(BL_dict['40'], ('root', 0))
self.assertEqual(BL_dict['39'], ('40', 57... | -7,684,371,037,230,970,000 | parse_PAUP_log extracts branch length info from a PAUP log file | tests/test_parse/test_nexus.py | test_parse_PAUP_log | tla256/cogent3 | python | def test_parse_PAUP_log(self):
BL_dict = parse_PAUP_log(PAUP_log)
self.assertEqual(len(BL_dict), 40)
self.assertEqual(BL_dict['1'], ('40', 40))
self.assertEqual(BL_dict['40'], ('root', 0))
self.assertEqual(BL_dict['39'], ('40', 57))
self.assertEqual(BL_dict['2'], ('39', 56))
self.assert... |
def test_align_with_comments(self):
'correctly handle an alignment block containing comments'
parser = MinimalNexusAlignParser('data/nexus_comments.nex')
got = {n: s for (n, s) in parser}
expect = {'Ephedra': 'TTAAGCCATGCATGTCTAAGTATGAACTAATTCCAAACGGTGA', 'Gnetum': 'TTAAGCCATGCATGTCTATGTACGAACTAATC-AGAA... | -8,109,012,507,756,125,000 | correctly handle an alignment block containing comments | tests/test_parse/test_nexus.py | test_align_with_comments | tla256/cogent3 | python | def test_align_with_comments(self):
parser = MinimalNexusAlignParser('data/nexus_comments.nex')
got = {n: s for (n, s) in parser}
expect = {'Ephedra': 'TTAAGCCATGCATGTCTAAGTATGAACTAATTCCAAACGGTGA', 'Gnetum': 'TTAAGCCATGCATGTCTATGTACGAACTAATC-AGAACGGTGA', 'Welwitschia': 'TTAAGCCATGCACGTGTAAGTATGAACTAGTC... |
def test_align_with_spaced_seqs(self):
'correctly handle an alignment block with spaces in seqs'
parser = MinimalNexusAlignParser('data/nexus_dna.nex')
seqs = {n: s for (n, s) in parser}
self.assertEqual(len(seqs), 10)
lengths = set((len(seqs[n]) for n in seqs))
self.assertEqual(lengths, {705}) | -6,359,138,814,970,381,000 | correctly handle an alignment block with spaces in seqs | tests/test_parse/test_nexus.py | test_align_with_spaced_seqs | tla256/cogent3 | python | def test_align_with_spaced_seqs(self):
parser = MinimalNexusAlignParser('data/nexus_dna.nex')
seqs = {n: s for (n, s) in parser}
self.assertEqual(len(seqs), 10)
lengths = set((len(seqs[n]) for n in seqs))
self.assertEqual(lengths, {705}) |
def test_align_from_mixed(self):
'correctly handle a file with tree and alignment block'
parser = MinimalNexusAlignParser('data/nexus_mixed.nex')
got = {n: s for (n, s) in parser}
expect = {'fish': 'ACATAGAGGGTACCTCTAAG', 'frog': 'ACATAGAGGGTACCTCTAAG', 'snake': 'ACATAGAGGGTACCTCTAAG', 'mouse': 'ACATAGA... | 4,272,722,143,379,951,000 | correctly handle a file with tree and alignment block | tests/test_parse/test_nexus.py | test_align_from_mixed | tla256/cogent3 | python | def test_align_from_mixed(self):
parser = MinimalNexusAlignParser('data/nexus_mixed.nex')
got = {n: s for (n, s) in parser}
expect = {'fish': 'ACATAGAGGGTACCTCTAAG', 'frog': 'ACATAGAGGGTACCTCTAAG', 'snake': 'ACATAGAGGGTACCTCTAAG', 'mouse': 'ACATAGAGGGTACCTCTAAG'}
self.assertEqual(got, expect) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.