repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
ecell/ecell4
ecell4/extra/azure_batch.py
create_job
def create_job(batch_service_client, job_id, pool_id): """Creates a job with the specified ID, associated with the specified pool. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID for the job. :param str pool_id: The ID for the pool. """ print('Creating job [{}]...'.format(job_id)) job = batch.models.JobAddParameter( id=job_id, pool_info=batch.models.PoolInformation(pool_id=pool_id)) try: batch_service_client.job.add(job) except batchmodels.batch_error.BatchErrorException as err: print_batch_exception(err) raise
python
def create_job(batch_service_client, job_id, pool_id): """Creates a job with the specified ID, associated with the specified pool. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID for the job. :param str pool_id: The ID for the pool. """ print('Creating job [{}]...'.format(job_id)) job = batch.models.JobAddParameter( id=job_id, pool_info=batch.models.PoolInformation(pool_id=pool_id)) try: batch_service_client.job.add(job) except batchmodels.batch_error.BatchErrorException as err: print_batch_exception(err) raise
[ "def", "create_job", "(", "batch_service_client", ",", "job_id", ",", "pool_id", ")", ":", "print", "(", "'Creating job [{}]...'", ".", "format", "(", "job_id", ")", ")", "job", "=", "batch", ".", "models", ".", "JobAddParameter", "(", "id", "=", "job_id", ...
Creates a job with the specified ID, associated with the specified pool. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID for the job. :param str pool_id: The ID for the pool.
[ "Creates", "a", "job", "with", "the", "specified", "ID", "associated", "with", "the", "specified", "pool", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L234-L252
train
ecell/ecell4
ecell4/extra/azure_batch.py
add_tasks
def add_tasks(batch_service_client, job_id, loads, output_container_name, output_container_sas_token, task_file, acount_name): """Adds a task for each input file in the collection to the specified job. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID of the job to which to add the tasks. :param list input_files: A collection of input files. One task will be created for each input file. :param output_container_name: The ID of an Azure Blob storage container to which the tasks will upload their results. :param output_container_sas_token: A SAS token granting write access to the specified Azure Blob storage container. :param str task_file: A file name of the script :param str account_name: A storage account """ _log.info('Adding {} tasks to job [{}]...'.format(len(loads), job_id)) # _log.info('Adding {} tasks to job [{}]...'.format(len(input_files), job_id)) tasks = list() for (input_file, output_file, i, j) in loads: command = ['python $AZ_BATCH_NODE_SHARED_DIR/{} ' '--filepath {} --output {} --storageaccount {} ' '--task_id {} --job_id {} ' '--storagecontainer {} --sastoken "{}"'.format( os.path.basename(task_file), input_file.file_path, output_file, acount_name, i, j, output_container_name, output_container_sas_token)] _log.debug('CMD : "{}"'.format(command[0])) tasks.append(batch.models.TaskAddParameter( id='topNtask{}-{}'.format(i, j), command_line=command, resource_files=[input_file] ) ) batch_service_client.task.add_collection(job_id, tasks) task_ids = [task.id for task in tasks] _log.info('{} tasks were added.'.format(len(task_ids))) return task_ids
python
def add_tasks(batch_service_client, job_id, loads, output_container_name, output_container_sas_token, task_file, acount_name): """Adds a task for each input file in the collection to the specified job. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID of the job to which to add the tasks. :param list input_files: A collection of input files. One task will be created for each input file. :param output_container_name: The ID of an Azure Blob storage container to which the tasks will upload their results. :param output_container_sas_token: A SAS token granting write access to the specified Azure Blob storage container. :param str task_file: A file name of the script :param str account_name: A storage account """ _log.info('Adding {} tasks to job [{}]...'.format(len(loads), job_id)) # _log.info('Adding {} tasks to job [{}]...'.format(len(input_files), job_id)) tasks = list() for (input_file, output_file, i, j) in loads: command = ['python $AZ_BATCH_NODE_SHARED_DIR/{} ' '--filepath {} --output {} --storageaccount {} ' '--task_id {} --job_id {} ' '--storagecontainer {} --sastoken "{}"'.format( os.path.basename(task_file), input_file.file_path, output_file, acount_name, i, j, output_container_name, output_container_sas_token)] _log.debug('CMD : "{}"'.format(command[0])) tasks.append(batch.models.TaskAddParameter( id='topNtask{}-{}'.format(i, j), command_line=command, resource_files=[input_file] ) ) batch_service_client.task.add_collection(job_id, tasks) task_ids = [task.id for task in tasks] _log.info('{} tasks were added.'.format(len(task_ids))) return task_ids
[ "def", "add_tasks", "(", "batch_service_client", ",", "job_id", ",", "loads", ",", "output_container_name", ",", "output_container_sas_token", ",", "task_file", ",", "acount_name", ")", ":", "_log", ".", "info", "(", "'Adding {} tasks to job [{}]...'", ".", "format", ...
Adds a task for each input file in the collection to the specified job. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID of the job to which to add the tasks. :param list input_files: A collection of input files. One task will be created for each input file. :param output_container_name: The ID of an Azure Blob storage container to which the tasks will upload their results. :param output_container_sas_token: A SAS token granting write access to the specified Azure Blob storage container. :param str task_file: A file name of the script :param str account_name: A storage account
[ "Adds", "a", "task", "for", "each", "input", "file", "in", "the", "collection", "to", "the", "specified", "job", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L254-L302
train
ecell/ecell4
ecell4/extra/azure_batch.py
wait_for_tasks_to_complete
def wait_for_tasks_to_complete(batch_service_client, job_ids, timeout): """Returns when all tasks in the specified job reach the Completed state. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The id of the job whose tasks should be to monitored. :param timedelta timeout: The duration to wait for task completion. If all tasks in the specified job do not reach Completed state within this time period, an exception will be raised. """ timeout_expiration = datetime.datetime.now() + timeout print("Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end='') while datetime.datetime.now() < timeout_expiration: print('.', end='') sys.stdout.flush() # tasks = batch_service_client.task.list(job_id) # incomplete_tasks = [task for task in tasks if # task.state != batchmodels.TaskState.completed] for (job_id, _) in job_ids: tasks = batch_service_client.task.list(job_id) incomplete_tasks = [task for task in tasks if task.state != batchmodels.TaskState.completed] if incomplete_tasks: break if not incomplete_tasks: print() return True else: time.sleep(1) raise RuntimeError("ERROR: Tasks did not reach 'Completed' state within " "timeout period of " + str(timeout))
python
def wait_for_tasks_to_complete(batch_service_client, job_ids, timeout): """Returns when all tasks in the specified job reach the Completed state. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The id of the job whose tasks should be to monitored. :param timedelta timeout: The duration to wait for task completion. If all tasks in the specified job do not reach Completed state within this time period, an exception will be raised. """ timeout_expiration = datetime.datetime.now() + timeout print("Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end='') while datetime.datetime.now() < timeout_expiration: print('.', end='') sys.stdout.flush() # tasks = batch_service_client.task.list(job_id) # incomplete_tasks = [task for task in tasks if # task.state != batchmodels.TaskState.completed] for (job_id, _) in job_ids: tasks = batch_service_client.task.list(job_id) incomplete_tasks = [task for task in tasks if task.state != batchmodels.TaskState.completed] if incomplete_tasks: break if not incomplete_tasks: print() return True else: time.sleep(1) raise RuntimeError("ERROR: Tasks did not reach 'Completed' state within " "timeout period of " + str(timeout))
[ "def", "wait_for_tasks_to_complete", "(", "batch_service_client", ",", "job_ids", ",", "timeout", ")", ":", "timeout_expiration", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "+", "timeout", "print", "(", "\"Monitoring all tasks for 'Completed' state, timeout...
Returns when all tasks in the specified job reach the Completed state. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The id of the job whose tasks should be to monitored. :param timedelta timeout: The duration to wait for task completion. If all tasks in the specified job do not reach Completed state within this time period, an exception will be raised.
[ "Returns", "when", "all", "tasks", "in", "the", "specified", "job", "reach", "the", "Completed", "state", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L304-L337
train
ecell/ecell4
ecell4/extra/azure_batch.py
download_blobs_from_container
def download_blobs_from_container(block_blob_client, container_name, directory_path, prefix=None): """Downloads all blobs from the specified Azure Blob storage container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param container_name: The Azure Blob storage container from which to download files. :param directory_path: The local directory to which to download the files. :param str prefix: A name prefix to filter blobs. None as its default """ _log.info('Downloading all files from container [{}]...'.format(container_name)) container_blobs = block_blob_client.list_blobs(container_name, prefix=None) _log.info('{} blobs are found [{}]'.format(len(tuple(container_blobs)), ', '.join(blob.name for blob in container_blobs.items))) for blob in container_blobs.items: destination_file_path = os.path.join(directory_path, blob.name) block_blob_client.get_blob_to_path(container_name, blob.name, destination_file_path) _log.info(' Downloaded blob [{}] from container [{}] to {}'.format( blob.name, container_name, destination_file_path)) _log.info(' Download complete!')
python
def download_blobs_from_container(block_blob_client, container_name, directory_path, prefix=None): """Downloads all blobs from the specified Azure Blob storage container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param container_name: The Azure Blob storage container from which to download files. :param directory_path: The local directory to which to download the files. :param str prefix: A name prefix to filter blobs. None as its default """ _log.info('Downloading all files from container [{}]...'.format(container_name)) container_blobs = block_blob_client.list_blobs(container_name, prefix=None) _log.info('{} blobs are found [{}]'.format(len(tuple(container_blobs)), ', '.join(blob.name for blob in container_blobs.items))) for blob in container_blobs.items: destination_file_path = os.path.join(directory_path, blob.name) block_blob_client.get_blob_to_path(container_name, blob.name, destination_file_path) _log.info(' Downloaded blob [{}] from container [{}] to {}'.format( blob.name, container_name, destination_file_path)) _log.info(' Download complete!')
[ "def", "download_blobs_from_container", "(", "block_blob_client", ",", "container_name", ",", "directory_path", ",", "prefix", "=", "None", ")", ":", "_log", ".", "info", "(", "'Downloading all files from container [{}]...'", ".", "format", "(", "container_name", ")", ...
Downloads all blobs from the specified Azure Blob storage container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param container_name: The Azure Blob storage container from which to download files. :param directory_path: The local directory to which to download the files. :param str prefix: A name prefix to filter blobs. None as its default
[ "Downloads", "all", "blobs", "from", "the", "specified", "Azure", "Blob", "storage", "container", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L339-L368
train
ecell/ecell4
ecell4/extra/azure_batch.py
singlerun
def singlerun(job, task_id=0, job_id=0): """This task is for an example.""" import ecell4_base import ecell4 import ecell4.util.simulation import ecell4.util.decorator print('ecell4_base.__version__ = {:s}'.format(ecell4_base.__version__)) print('ecell4.__version__ = {:s}'.format(ecell4.__version__)) print('job={}, task_id={}, job_id={}'.format(str(job), task_id, job_id)) with ecell4.util.decorator.reaction_rules(): A + B == C | (0.01, 0.3) res = ecell4.util.simulation.run_simulation( 1.0, y0={'A': job[0], 'B': job[1], 'C': job[2]}, rndseed=job_id, solver='gillespie', return_type='array') print('A simulation was successfully done.') return res
python
def singlerun(job, task_id=0, job_id=0): """This task is for an example.""" import ecell4_base import ecell4 import ecell4.util.simulation import ecell4.util.decorator print('ecell4_base.__version__ = {:s}'.format(ecell4_base.__version__)) print('ecell4.__version__ = {:s}'.format(ecell4.__version__)) print('job={}, task_id={}, job_id={}'.format(str(job), task_id, job_id)) with ecell4.util.decorator.reaction_rules(): A + B == C | (0.01, 0.3) res = ecell4.util.simulation.run_simulation( 1.0, y0={'A': job[0], 'B': job[1], 'C': job[2]}, rndseed=job_id, solver='gillespie', return_type='array') print('A simulation was successfully done.') return res
[ "def", "singlerun", "(", "job", ",", "task_id", "=", "0", ",", "job_id", "=", "0", ")", ":", "import", "ecell4_base", "import", "ecell4", "import", "ecell4", ".", "util", ".", "simulation", "import", "ecell4", ".", "util", ".", "decorator", "print", "(",...
This task is for an example.
[ "This", "task", "is", "for", "an", "example", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L757-L779
train
ecell/ecell4
ecell4/util/viz.py
plot_number_observer
def plot_number_observer(*args, **kwargs): """ Generate a plot from NumberObservers and show it. See plot_number_observer_with_matplotlib and _with_nya for details. Parameters ---------- obs : NumberObserver (e.g. FixedIntervalNumberObserver) interactive : bool, default False Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with nyaplot. Examples -------- >>> plot_number_observer(obs1) >>> plot_number_observer(obs1, interactive=True) """ interactive = kwargs.pop('interactive', False) if interactive: plot_number_observer_with_nya(*args, **kwargs) # elif __on_ipython_notebook(): # kwargs['to_png'] = True # plot_number_observer_with_nya(*args, **kwargs) else: if kwargs.pop('to_png', None) is not None: #XXX: Remove an option available only on nyaplot for the consistency import warnings warnings.warn( "An option 'to_png' is not available with matplotlib. Just ignored.") plot_number_observer_with_matplotlib(*args, **kwargs)
python
def plot_number_observer(*args, **kwargs): """ Generate a plot from NumberObservers and show it. See plot_number_observer_with_matplotlib and _with_nya for details. Parameters ---------- obs : NumberObserver (e.g. FixedIntervalNumberObserver) interactive : bool, default False Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with nyaplot. Examples -------- >>> plot_number_observer(obs1) >>> plot_number_observer(obs1, interactive=True) """ interactive = kwargs.pop('interactive', False) if interactive: plot_number_observer_with_nya(*args, **kwargs) # elif __on_ipython_notebook(): # kwargs['to_png'] = True # plot_number_observer_with_nya(*args, **kwargs) else: if kwargs.pop('to_png', None) is not None: #XXX: Remove an option available only on nyaplot for the consistency import warnings warnings.warn( "An option 'to_png' is not available with matplotlib. Just ignored.") plot_number_observer_with_matplotlib(*args, **kwargs)
[ "def", "plot_number_observer", "(", "*", "args", ",", "**", "kwargs", ")", ":", "interactive", "=", "kwargs", ".", "pop", "(", "'interactive'", ",", "False", ")", "if", "interactive", ":", "plot_number_observer_with_nya", "(", "*", "args", ",", "**", "kwargs...
Generate a plot from NumberObservers and show it. See plot_number_observer_with_matplotlib and _with_nya for details. Parameters ---------- obs : NumberObserver (e.g. FixedIntervalNumberObserver) interactive : bool, default False Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with nyaplot. Examples -------- >>> plot_number_observer(obs1) >>> plot_number_observer(obs1, interactive=True)
[ "Generate", "a", "plot", "from", "NumberObservers", "and", "show", "it", ".", "See", "plot_number_observer_with_matplotlib", "and", "_with_nya", "for", "details", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L28-L58
train
ecell/ecell4
ecell4/util/viz.py
plot_world
def plot_world(*args, **kwargs): """ Generate a plot from received instance of World and show it. See also plot_world_with_elegans and plot_world_with_matplotlib. Parameters ---------- world : World or str World or a HDF5 filename to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_world(w) >>> plot_world(w, interactive=False) """ interactive = kwargs.pop('interactive', True) if interactive: plot_world_with_elegans(*args, **kwargs) else: plot_world_with_matplotlib(*args, **kwargs)
python
def plot_world(*args, **kwargs): """ Generate a plot from received instance of World and show it. See also plot_world_with_elegans and plot_world_with_matplotlib. Parameters ---------- world : World or str World or a HDF5 filename to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_world(w) >>> plot_world(w, interactive=False) """ interactive = kwargs.pop('interactive', True) if interactive: plot_world_with_elegans(*args, **kwargs) else: plot_world_with_matplotlib(*args, **kwargs)
[ "def", "plot_world", "(", "*", "args", ",", "**", "kwargs", ")", ":", "interactive", "=", "kwargs", ".", "pop", "(", "'interactive'", ",", "True", ")", "if", "interactive", ":", "plot_world_with_elegans", "(", "*", "args", ",", "**", "kwargs", ")", "else...
Generate a plot from received instance of World and show it. See also plot_world_with_elegans and plot_world_with_matplotlib. Parameters ---------- world : World or str World or a HDF5 filename to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_world(w) >>> plot_world(w, interactive=False)
[ "Generate", "a", "plot", "from", "received", "instance", "of", "World", "and", "show", "it", ".", "See", "also", "plot_world_with_elegans", "and", "plot_world_with_matplotlib", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L60-L83
train
ecell/ecell4
ecell4/util/viz.py
plot_movie
def plot_movie(*args, **kwargs): """ Generate a movie from received instances of World and show them. See also plot_movie_with_elegans and plot_movie_with_matplotlib. Parameters ---------- worlds : list of World Worlds to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. """ interactive = kwargs.pop('interactive', False) if interactive: plot_movie_with_elegans(*args, **kwargs) else: plot_movie_with_matplotlib(*args, **kwargs)
python
def plot_movie(*args, **kwargs): """ Generate a movie from received instances of World and show them. See also plot_movie_with_elegans and plot_movie_with_matplotlib. Parameters ---------- worlds : list of World Worlds to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. """ interactive = kwargs.pop('interactive', False) if interactive: plot_movie_with_elegans(*args, **kwargs) else: plot_movie_with_matplotlib(*args, **kwargs)
[ "def", "plot_movie", "(", "*", "args", ",", "**", "kwargs", ")", ":", "interactive", "=", "kwargs", ".", "pop", "(", "'interactive'", ",", "False", ")", "if", "interactive", ":", "plot_movie_with_elegans", "(", "*", "args", ",", "**", "kwargs", ")", "els...
Generate a movie from received instances of World and show them. See also plot_movie_with_elegans and plot_movie_with_matplotlib. Parameters ---------- worlds : list of World Worlds to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans.
[ "Generate", "a", "movie", "from", "received", "instances", "of", "World", "and", "show", "them", ".", "See", "also", "plot_movie_with_elegans", "and", "plot_movie_with_matplotlib", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L85-L103
train
ecell/ecell4
ecell4/util/viz.py
plot_trajectory
def plot_trajectory(*args, **kwargs): """ Generate a plot from received instance of TrajectoryObserver and show it See also plot_trajectory_with_elegans and plot_trajectory_with_matplotlib. Parameters ---------- obs : TrajectoryObserver TrajectoryObserver to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_trajectory(obs) >>> plot_trajectory(obs, interactive=False) """ interactive = kwargs.pop('interactive', True) if interactive: plot_trajectory_with_elegans(*args, **kwargs) else: plot_trajectory_with_matplotlib(*args, **kwargs)
python
def plot_trajectory(*args, **kwargs): """ Generate a plot from received instance of TrajectoryObserver and show it See also plot_trajectory_with_elegans and plot_trajectory_with_matplotlib. Parameters ---------- obs : TrajectoryObserver TrajectoryObserver to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_trajectory(obs) >>> plot_trajectory(obs, interactive=False) """ interactive = kwargs.pop('interactive', True) if interactive: plot_trajectory_with_elegans(*args, **kwargs) else: plot_trajectory_with_matplotlib(*args, **kwargs)
[ "def", "plot_trajectory", "(", "*", "args", ",", "**", "kwargs", ")", ":", "interactive", "=", "kwargs", ".", "pop", "(", "'interactive'", ",", "True", ")", "if", "interactive", ":", "plot_trajectory_with_elegans", "(", "*", "args", ",", "**", "kwargs", ")...
Generate a plot from received instance of TrajectoryObserver and show it See also plot_trajectory_with_elegans and plot_trajectory_with_matplotlib. Parameters ---------- obs : TrajectoryObserver TrajectoryObserver to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_trajectory(obs) >>> plot_trajectory(obs, interactive=False)
[ "Generate", "a", "plot", "from", "received", "instance", "of", "TrajectoryObserver", "and", "show", "it", "See", "also", "plot_trajectory_with_elegans", "and", "plot_trajectory_with_matplotlib", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L105-L128
train
ecell/ecell4
ecell4/util/viz.py
plot_movie_with_elegans
def plot_movie_with_elegans( worlds, radius=None, width=500, height=500, config=None, grid=False, species_list=None): """ Generate a movie from received instances of World and show them on IPython notebook. Parameters ---------- worlds : list of World Worlds to render. radius : float, default None If this value is set, all particles in the world will be rendered as if their radius are the same. width : float, default 500 Width of the plotting area. height : float, default 500 Height of the plotting area. config : dict, default {} Dict for configure default colors. Its values are colors unique to each speices. The dictionary will be updated during this plot. Colors included in config dict will never be used for other speices. species_list : array of string, default None If set, plot_movie will not search the list of species """ config = config or {} from IPython.core.display import display, HTML from jinja2 import Template data = {} sizes = {} for i, world in enumerate(worlds): species = __parse_world(world, radius, species_list) for species_info in species: if data.get(species_info['name']) is None: data[species_info['name']] = [] data[species_info['name']].append({ 'df': species_info['data'], 't': i }) sizes[species_info['name']] = species_info['size'] options = { 'player': True, 'autorange': False, 'space_mode': 'wireframe', 'grid': grid, 'range': __get_range_of_world(worlds[0]) } model_id = '"movie' + str(uuid.uuid4()) + '"' color_scale = default_color_scale(config=config) display(HTML(generate_html({ 'model_id': model_id, 'names': json.dumps(list(data.keys())), 'data': json.dumps(list(data.values())), 'colors': json.dumps([color_scale.get_color(name) for name in data.keys()]), 'sizes': json.dumps([sizes[name] for name in data.keys()]), 'options': json.dumps(options) }, 'templates/movie.tmpl')))
python
def plot_movie_with_elegans( worlds, radius=None, width=500, height=500, config=None, grid=False, species_list=None): """ Generate a movie from received instances of World and show them on IPython notebook. Parameters ---------- worlds : list of World Worlds to render. radius : float, default None If this value is set, all particles in the world will be rendered as if their radius are the same. width : float, default 500 Width of the plotting area. height : float, default 500 Height of the plotting area. config : dict, default {} Dict for configure default colors. Its values are colors unique to each speices. The dictionary will be updated during this plot. Colors included in config dict will never be used for other speices. species_list : array of string, default None If set, plot_movie will not search the list of species """ config = config or {} from IPython.core.display import display, HTML from jinja2 import Template data = {} sizes = {} for i, world in enumerate(worlds): species = __parse_world(world, radius, species_list) for species_info in species: if data.get(species_info['name']) is None: data[species_info['name']] = [] data[species_info['name']].append({ 'df': species_info['data'], 't': i }) sizes[species_info['name']] = species_info['size'] options = { 'player': True, 'autorange': False, 'space_mode': 'wireframe', 'grid': grid, 'range': __get_range_of_world(worlds[0]) } model_id = '"movie' + str(uuid.uuid4()) + '"' color_scale = default_color_scale(config=config) display(HTML(generate_html({ 'model_id': model_id, 'names': json.dumps(list(data.keys())), 'data': json.dumps(list(data.values())), 'colors': json.dumps([color_scale.get_color(name) for name in data.keys()]), 'sizes': json.dumps([sizes[name] for name in data.keys()]), 'options': json.dumps(options) }, 'templates/movie.tmpl')))
[ "def", "plot_movie_with_elegans", "(", "worlds", ",", "radius", "=", "None", ",", "width", "=", "500", ",", "height", "=", "500", ",", "config", "=", "None", ",", "grid", "=", "False", ",", "species_list", "=", "None", ")", ":", "config", "=", "config"...
Generate a movie from received instances of World and show them on IPython notebook. Parameters ---------- worlds : list of World Worlds to render. radius : float, default None If this value is set, all particles in the world will be rendered as if their radius are the same. width : float, default 500 Width of the plotting area. height : float, default 500 Height of the plotting area. config : dict, default {} Dict for configure default colors. Its values are colors unique to each speices. The dictionary will be updated during this plot. Colors included in config dict will never be used for other speices. species_list : array of string, default None If set, plot_movie will not search the list of species
[ "Generate", "a", "movie", "from", "received", "instances", "of", "World", "and", "show", "them", "on", "IPython", "notebook", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L476-L539
train
ecell/ecell4
ecell4/util/viz.py
plot_world_with_elegans
def plot_world_with_elegans( world, radius=None, width=350, height=350, config=None, grid=True, wireframe=False, species_list=None, debug=None, max_count=1000, camera_position=(-22, 23, 32), camera_rotation=(-0.6, 0.5, 0.6), return_id=False, predicator=None): """ Generate a plot from received instance of World and show it on IPython notebook. This method returns the instance of dict that indicates color setting for each speices. You can use the dict as the parameter of plot_world, in order to use the same colors in another plot. Parameters ---------- world : World or str World or a HDF5 filename to render. radius : float, default None If this value is set, all particles in the world will be rendered as if their radius are the same. width : float, default 350 Width of the plotting area. height : float, default 350 Height of the plotting area. config : dict, default {} Dict for configure default colors. Its values are colors unique to each speices. The dictionary will be updated during this plot. Colors included in config dict will never be used for other speices. species_list : array of string, default None If set, plot_world will not search the list of species. max_count : Integer, default 1000 The maximum number of particles to show for each species. debug : array of dict, default [] *** EXPERIMENTAL IMPRIMENTATION *** Example: >> [{'type': 'box', 'x': 10, 'y': 10, 'z': 10, 'options': {'width': 1, 'height': 1}}] type: 'box', 'plane', 'sphere', and 'cylinder' x, y, z: float options: box: width, height, depth plane: width, height sphere: radius cylinder: radius, height camera_position : tuple, default (-22, 23, 32) camera_rotaiton : tuple, default (-0.6, 0.5, 0.6) Initial position and rotation of camera. return_id : bool, default False If True, return a model id, which is required for `to_png` function. """ config = config or {} from IPython.core.display import display, HTML from .simulation import load_world if isinstance(world, str): world = load_world(world) species = __parse_world(world, radius, species_list, max_count, predicator) color_scale = default_color_scale(config=config) plots = [] for species_info in species: plots.append({ 'type': 'Particles', 'data': species_info['data'], 'options': { 'name': species_info['name'], 'color': color_scale.get_color(species_info['name']), 'size': species_info['size'] } }) if debug is not None: data = {'type': [], 'x': [], 'y': [], 'z': [], 'options': []} for obj in debug: for k, v in obj.items(): data[k].append(v) plots.append({ 'type': 'DebugObject', 'data': data, 'options': {} }) model = { 'plots': plots, 'options': { 'world_width': width, 'world_height': height, 'range': __get_range_of_world(world), 'autorange': False, 'grid': grid, 'save_image': True # 'save_image': False } } if wireframe: model['options']['space_mode'] = 'wireframe' model_id = '"viz' + str(uuid.uuid4()) + '"' display(HTML(generate_html( {'model': json.dumps(model), 'model_id': model_id, 'px': camera_position[0], 'py': camera_position[1], 'pz': camera_position[2], 'rx': camera_rotation[0], 'ry': camera_rotation[1], 'rz': camera_rotation[2]}, 'templates/particles.tmpl'))) if return_id: return model_id
python
def plot_world_with_elegans( world, radius=None, width=350, height=350, config=None, grid=True, wireframe=False, species_list=None, debug=None, max_count=1000, camera_position=(-22, 23, 32), camera_rotation=(-0.6, 0.5, 0.6), return_id=False, predicator=None): """ Generate a plot from received instance of World and show it on IPython notebook. This method returns the instance of dict that indicates color setting for each speices. You can use the dict as the parameter of plot_world, in order to use the same colors in another plot. Parameters ---------- world : World or str World or a HDF5 filename to render. radius : float, default None If this value is set, all particles in the world will be rendered as if their radius are the same. width : float, default 350 Width of the plotting area. height : float, default 350 Height of the plotting area. config : dict, default {} Dict for configure default colors. Its values are colors unique to each speices. The dictionary will be updated during this plot. Colors included in config dict will never be used for other speices. species_list : array of string, default None If set, plot_world will not search the list of species. max_count : Integer, default 1000 The maximum number of particles to show for each species. debug : array of dict, default [] *** EXPERIMENTAL IMPRIMENTATION *** Example: >> [{'type': 'box', 'x': 10, 'y': 10, 'z': 10, 'options': {'width': 1, 'height': 1}}] type: 'box', 'plane', 'sphere', and 'cylinder' x, y, z: float options: box: width, height, depth plane: width, height sphere: radius cylinder: radius, height camera_position : tuple, default (-22, 23, 32) camera_rotaiton : tuple, default (-0.6, 0.5, 0.6) Initial position and rotation of camera. return_id : bool, default False If True, return a model id, which is required for `to_png` function. """ config = config or {} from IPython.core.display import display, HTML from .simulation import load_world if isinstance(world, str): world = load_world(world) species = __parse_world(world, radius, species_list, max_count, predicator) color_scale = default_color_scale(config=config) plots = [] for species_info in species: plots.append({ 'type': 'Particles', 'data': species_info['data'], 'options': { 'name': species_info['name'], 'color': color_scale.get_color(species_info['name']), 'size': species_info['size'] } }) if debug is not None: data = {'type': [], 'x': [], 'y': [], 'z': [], 'options': []} for obj in debug: for k, v in obj.items(): data[k].append(v) plots.append({ 'type': 'DebugObject', 'data': data, 'options': {} }) model = { 'plots': plots, 'options': { 'world_width': width, 'world_height': height, 'range': __get_range_of_world(world), 'autorange': False, 'grid': grid, 'save_image': True # 'save_image': False } } if wireframe: model['options']['space_mode'] = 'wireframe' model_id = '"viz' + str(uuid.uuid4()) + '"' display(HTML(generate_html( {'model': json.dumps(model), 'model_id': model_id, 'px': camera_position[0], 'py': camera_position[1], 'pz': camera_position[2], 'rx': camera_rotation[0], 'ry': camera_rotation[1], 'rz': camera_rotation[2]}, 'templates/particles.tmpl'))) if return_id: return model_id
[ "def", "plot_world_with_elegans", "(", "world", ",", "radius", "=", "None", ",", "width", "=", "350", ",", "height", "=", "350", ",", "config", "=", "None", ",", "grid", "=", "True", ",", "wireframe", "=", "False", ",", "species_list", "=", "None", ","...
Generate a plot from received instance of World and show it on IPython notebook. This method returns the instance of dict that indicates color setting for each speices. You can use the dict as the parameter of plot_world, in order to use the same colors in another plot. Parameters ---------- world : World or str World or a HDF5 filename to render. radius : float, default None If this value is set, all particles in the world will be rendered as if their radius are the same. width : float, default 350 Width of the plotting area. height : float, default 350 Height of the plotting area. config : dict, default {} Dict for configure default colors. Its values are colors unique to each speices. The dictionary will be updated during this plot. Colors included in config dict will never be used for other speices. species_list : array of string, default None If set, plot_world will not search the list of species. max_count : Integer, default 1000 The maximum number of particles to show for each species. debug : array of dict, default [] *** EXPERIMENTAL IMPRIMENTATION *** Example: >> [{'type': 'box', 'x': 10, 'y': 10, 'z': 10, 'options': {'width': 1, 'height': 1}}] type: 'box', 'plane', 'sphere', and 'cylinder' x, y, z: float options: box: width, height, depth plane: width, height sphere: radius cylinder: radius, height camera_position : tuple, default (-22, 23, 32) camera_rotaiton : tuple, default (-0.6, 0.5, 0.6) Initial position and rotation of camera. return_id : bool, default False If True, return a model id, which is required for `to_png` function.
[ "Generate", "a", "plot", "from", "received", "instance", "of", "World", "and", "show", "it", "on", "IPython", "notebook", ".", "This", "method", "returns", "the", "instance", "of", "dict", "that", "indicates", "color", "setting", "for", "each", "speices", "....
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L541-L648
train
ecell/ecell4
ecell4/util/viz.py
generate_html
def generate_html(keywords, tmpl_path, package_name='ecell4.util'): """ Generate static html file from JSON model and its own id. Parameters ---------- model : dict JSON model from which ecell4.viz generates a plot. model_id : string Unique id for the plot. Returns ------- html : A HTML object """ from jinja2 import Template import pkgutil template = Template(pkgutil.get_data(package_name, tmpl_path).decode()) # path = os.path.abspath(os.path.dirname(__file__)) + tmpl_path # template = Template(open(path).read()) html = template.render(**keywords) return html
python
def generate_html(keywords, tmpl_path, package_name='ecell4.util'): """ Generate static html file from JSON model and its own id. Parameters ---------- model : dict JSON model from which ecell4.viz generates a plot. model_id : string Unique id for the plot. Returns ------- html : A HTML object """ from jinja2 import Template import pkgutil template = Template(pkgutil.get_data(package_name, tmpl_path).decode()) # path = os.path.abspath(os.path.dirname(__file__)) + tmpl_path # template = Template(open(path).read()) html = template.render(**keywords) return html
[ "def", "generate_html", "(", "keywords", ",", "tmpl_path", ",", "package_name", "=", "'ecell4.util'", ")", ":", "from", "jinja2", "import", "Template", "import", "pkgutil", "template", "=", "Template", "(", "pkgutil", ".", "get_data", "(", "package_name", ",", ...
Generate static html file from JSON model and its own id. Parameters ---------- model : dict JSON model from which ecell4.viz generates a plot. model_id : string Unique id for the plot. Returns ------- html : A HTML object
[ "Generate", "static", "html", "file", "from", "JSON", "model", "and", "its", "own", "id", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L802-L825
train
ecell/ecell4
ecell4/util/viz.py
plot_trajectory2d_with_matplotlib
def plot_trajectory2d_with_matplotlib( obs, plane='xy', max_count=10, figsize=6, legend=True, wireframe=False, grid=True, noaxis=False, plot_range=None, **kwargs): """ Make a 2D plot from received instance of TrajectoryObserver and show it on IPython notebook. Parameters ---------- obs : TrajectoryObserver TrajectoryObserver to render. plane : str, default 'xy' 'xy', 'yz', 'zx'. max_count : Integer, default 10 The maximum number of particles to show. If None, show all. figsize : float, default 6 Size of the plotting area. Given in inch. legend : bool, default True plot_range : tuple, default None Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez). If None, the minimum volume containing all the trajectories is used. """ import matplotlib.pyplot as plt plane = plane.lower() if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'): raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane))) xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2) yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2) data = obs.data() if max_count is not None and len(data) > max_count: data = random.sample(data, max_count) wrange = __get_range_of_trajectories(data, plot_range) wrange = (wrange['x'], wrange['y'], wrange['z']) wrange = {'x': wrange[xidx], 'y': wrange[yidx]} fig, ax = __prepare_plot_with_matplotlib( wrange, figsize, grid, wireframe, noaxis) ax.set_xlabel(plane[0].upper()) ax.set_ylabel(plane[1].upper()) lines = [] for i, y in enumerate(data): xarr, yarr, zarr = [], [], [] for pos in y: xarr.append(pos[xidx]) yarr.append(pos[yidx]) lines.append((xarr, yarr)) __plot_trajectory2d_with_matplotlib(lines, ax, **kwargs) # if legend: # ax.legend(loc='best', shadow=True) if legend is not None and legend is not False: legend_opts = {"loc": "best", "shadow": True} if isinstance(legend, dict): legend_opts.update(legend) ax.legend(**legend_opts) plt.show()
python
def plot_trajectory2d_with_matplotlib( obs, plane='xy', max_count=10, figsize=6, legend=True, wireframe=False, grid=True, noaxis=False, plot_range=None, **kwargs): """ Make a 2D plot from received instance of TrajectoryObserver and show it on IPython notebook. Parameters ---------- obs : TrajectoryObserver TrajectoryObserver to render. plane : str, default 'xy' 'xy', 'yz', 'zx'. max_count : Integer, default 10 The maximum number of particles to show. If None, show all. figsize : float, default 6 Size of the plotting area. Given in inch. legend : bool, default True plot_range : tuple, default None Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez). If None, the minimum volume containing all the trajectories is used. """ import matplotlib.pyplot as plt plane = plane.lower() if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'): raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane))) xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2) yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2) data = obs.data() if max_count is not None and len(data) > max_count: data = random.sample(data, max_count) wrange = __get_range_of_trajectories(data, plot_range) wrange = (wrange['x'], wrange['y'], wrange['z']) wrange = {'x': wrange[xidx], 'y': wrange[yidx]} fig, ax = __prepare_plot_with_matplotlib( wrange, figsize, grid, wireframe, noaxis) ax.set_xlabel(plane[0].upper()) ax.set_ylabel(plane[1].upper()) lines = [] for i, y in enumerate(data): xarr, yarr, zarr = [], [], [] for pos in y: xarr.append(pos[xidx]) yarr.append(pos[yidx]) lines.append((xarr, yarr)) __plot_trajectory2d_with_matplotlib(lines, ax, **kwargs) # if legend: # ax.legend(loc='best', shadow=True) if legend is not None and legend is not False: legend_opts = {"loc": "best", "shadow": True} if isinstance(legend, dict): legend_opts.update(legend) ax.legend(**legend_opts) plt.show()
[ "def", "plot_trajectory2d_with_matplotlib", "(", "obs", ",", "plane", "=", "'xy'", ",", "max_count", "=", "10", ",", "figsize", "=", "6", ",", "legend", "=", "True", ",", "wireframe", "=", "False", ",", "grid", "=", "True", ",", "noaxis", "=", "False", ...
Make a 2D plot from received instance of TrajectoryObserver and show it on IPython notebook. Parameters ---------- obs : TrajectoryObserver TrajectoryObserver to render. plane : str, default 'xy' 'xy', 'yz', 'zx'. max_count : Integer, default 10 The maximum number of particles to show. If None, show all. figsize : float, default 6 Size of the plotting area. Given in inch. legend : bool, default True plot_range : tuple, default None Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez). If None, the minimum volume containing all the trajectories is used.
[ "Make", "a", "2D", "plot", "from", "received", "instance", "of", "TrajectoryObserver", "and", "show", "it", "on", "IPython", "notebook", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L1243-L1304
train
ecell/ecell4
ecell4/util/viz.py
plot_world2d_with_matplotlib
def plot_world2d_with_matplotlib( world, plane='xy', marker_size=3, figsize=6, grid=True, wireframe=False, species_list=None, max_count=1000, angle=None, legend=True, noaxis=False, scale=1.0, **kwargs): """ Make a 2D plot from received instance of World and show it on IPython notebook. Parameters ---------- world : World or str World to render. A HDF5 filename is also acceptable. plane : str, default 'xy' 'xy', 'yz', 'zx'. marker_size : float, default 3 Marker size for all species. Size is passed to scatter function as argument, s=(2 ** marker_size). figsize : float, default 6 Size of the plotting area. Given in inch. species_list : array of string, default None If set, plot_world will not search the list of species. max_count : Integer, default 1000 The maximum number of particles to show for each species. None means no limitation. angle : tuple, default None A tuple of view angle which is given as (azim, elev, dist). If None, use default assumed to be (-60, 30, 10). legend : bool, default True scale : float, default 1 A length-scaling factor """ import matplotlib.pyplot as plt plane = plane.lower() if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'): raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane))) xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2) yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2) if species_list is None: species_list = [p.species().serial() for pid, p in world.list_particles()] species_list = sorted( set(species_list), key=species_list.index) # XXX: pick unique ones wrange = __get_range_of_world(world, scale) wrange = (wrange['x'], wrange['y'], wrange['z']) wrange = {'x': wrange[xidx], 'y': wrange[yidx]} fig, ax = __prepare_plot_with_matplotlib( wrange, figsize, grid, wireframe, noaxis) scatters, plots = __scatter_world2d_with_matplotlib( world, (xidx, yidx), ax, species_list, marker_size, max_count, scale, **kwargs) ax.set_xlabel(plane[0].upper()) ax.set_ylabel(plane[1].upper()) # if legend: # ax.legend(handles=plots, labels=species_list, loc='best', shadow=True) if legend is not None and legend is not False: legend_opts = {'loc': 'center left', 'bbox_to_anchor': (1.0, 0.5), 'shadow': False, 'frameon': False, 'fontsize': 'x-large', 'scatterpoints': 1} if isinstance(legend, dict): legend_opts.update(legend) ax.legend(**legend_opts) # ax.legend(handles=plots, labels=species_list, **legend_opts) plt.show()
python
def plot_world2d_with_matplotlib( world, plane='xy', marker_size=3, figsize=6, grid=True, wireframe=False, species_list=None, max_count=1000, angle=None, legend=True, noaxis=False, scale=1.0, **kwargs): """ Make a 2D plot from received instance of World and show it on IPython notebook. Parameters ---------- world : World or str World to render. A HDF5 filename is also acceptable. plane : str, default 'xy' 'xy', 'yz', 'zx'. marker_size : float, default 3 Marker size for all species. Size is passed to scatter function as argument, s=(2 ** marker_size). figsize : float, default 6 Size of the plotting area. Given in inch. species_list : array of string, default None If set, plot_world will not search the list of species. max_count : Integer, default 1000 The maximum number of particles to show for each species. None means no limitation. angle : tuple, default None A tuple of view angle which is given as (azim, elev, dist). If None, use default assumed to be (-60, 30, 10). legend : bool, default True scale : float, default 1 A length-scaling factor """ import matplotlib.pyplot as plt plane = plane.lower() if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'): raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane))) xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2) yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2) if species_list is None: species_list = [p.species().serial() for pid, p in world.list_particles()] species_list = sorted( set(species_list), key=species_list.index) # XXX: pick unique ones wrange = __get_range_of_world(world, scale) wrange = (wrange['x'], wrange['y'], wrange['z']) wrange = {'x': wrange[xidx], 'y': wrange[yidx]} fig, ax = __prepare_plot_with_matplotlib( wrange, figsize, grid, wireframe, noaxis) scatters, plots = __scatter_world2d_with_matplotlib( world, (xidx, yidx), ax, species_list, marker_size, max_count, scale, **kwargs) ax.set_xlabel(plane[0].upper()) ax.set_ylabel(plane[1].upper()) # if legend: # ax.legend(handles=plots, labels=species_list, loc='best', shadow=True) if legend is not None and legend is not False: legend_opts = {'loc': 'center left', 'bbox_to_anchor': (1.0, 0.5), 'shadow': False, 'frameon': False, 'fontsize': 'x-large', 'scatterpoints': 1} if isinstance(legend, dict): legend_opts.update(legend) ax.legend(**legend_opts) # ax.legend(handles=plots, labels=species_list, **legend_opts) plt.show()
[ "def", "plot_world2d_with_matplotlib", "(", "world", ",", "plane", "=", "'xy'", ",", "marker_size", "=", "3", ",", "figsize", "=", "6", ",", "grid", "=", "True", ",", "wireframe", "=", "False", ",", "species_list", "=", "None", ",", "max_count", "=", "10...
Make a 2D plot from received instance of World and show it on IPython notebook. Parameters ---------- world : World or str World to render. A HDF5 filename is also acceptable. plane : str, default 'xy' 'xy', 'yz', 'zx'. marker_size : float, default 3 Marker size for all species. Size is passed to scatter function as argument, s=(2 ** marker_size). figsize : float, default 6 Size of the plotting area. Given in inch. species_list : array of string, default None If set, plot_world will not search the list of species. max_count : Integer, default 1000 The maximum number of particles to show for each species. None means no limitation. angle : tuple, default None A tuple of view angle which is given as (azim, elev, dist). If None, use default assumed to be (-60, 30, 10). legend : bool, default True scale : float, default 1 A length-scaling factor
[ "Make", "a", "2D", "plot", "from", "received", "instance", "of", "World", "and", "show", "it", "on", "IPython", "notebook", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L1908-L1974
train
ecell/ecell4
ecell4/util/viz.py
plot_world_with_plotly
def plot_world_with_plotly(world, species_list=None, max_count=1000): """ Plot a World on IPython Notebook """ if isinstance(world, str): from .simulation import load_world world = load_world(world) if species_list is None: species_list = [sp.serial() for sp in world.list_species()] species_list.sort() import random from ecell4_base.core import Species positions = {} for serial in species_list: x, y, z = [], [], [] particles = world.list_particles_exact(Species(serial)) if max_count is not None and len(particles) > max_count: particles = random.sample(particles, max_count) for pid, p in particles: pos = p.position() x.append(pos[0]) y.append(pos[1]) z.append(pos[2]) positions[serial] = (x, y, z) import plotly import plotly.graph_objs as go plotly.offline.init_notebook_mode() marker = dict(size=6, line=dict(color='rgb(204, 204, 204)', width=1), opacity=0.9, symbol='circle') data = [] for serial, (x, y, z) in positions.items(): trace = go.Scatter3d( x=x, y=y, z=z, mode='markers', marker=marker, name=serial) data.append(trace) layout = go.Layout(margin=dict(l=0, r=0, b=0, t=0)) fig = go.Figure(data=data, layout=layout) plotly.offline.iplot(fig)
python
def plot_world_with_plotly(world, species_list=None, max_count=1000): """ Plot a World on IPython Notebook """ if isinstance(world, str): from .simulation import load_world world = load_world(world) if species_list is None: species_list = [sp.serial() for sp in world.list_species()] species_list.sort() import random from ecell4_base.core import Species positions = {} for serial in species_list: x, y, z = [], [], [] particles = world.list_particles_exact(Species(serial)) if max_count is not None and len(particles) > max_count: particles = random.sample(particles, max_count) for pid, p in particles: pos = p.position() x.append(pos[0]) y.append(pos[1]) z.append(pos[2]) positions[serial] = (x, y, z) import plotly import plotly.graph_objs as go plotly.offline.init_notebook_mode() marker = dict(size=6, line=dict(color='rgb(204, 204, 204)', width=1), opacity=0.9, symbol='circle') data = [] for serial, (x, y, z) in positions.items(): trace = go.Scatter3d( x=x, y=y, z=z, mode='markers', marker=marker, name=serial) data.append(trace) layout = go.Layout(margin=dict(l=0, r=0, b=0, t=0)) fig = go.Figure(data=data, layout=layout) plotly.offline.iplot(fig)
[ "def", "plot_world_with_plotly", "(", "world", ",", "species_list", "=", "None", ",", "max_count", "=", "1000", ")", ":", "if", "isinstance", "(", "world", ",", "str", ")", ":", "from", ".", "simulation", "import", "load_world", "world", "=", "load_world", ...
Plot a World on IPython Notebook
[ "Plot", "a", "World", "on", "IPython", "Notebook" ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L2136-L2182
train
ecell/ecell4
ecell4/extra/_unit.py
getUnitRegistry
def getUnitRegistry(length="meter", time="second", substance="item", volume=None, other=()): """Return a pint.UnitRegistry made compatible with ecell4. Parameters ---------- length : str, optional A default unit for '[length]'. 'meter' is its default. time : str, optional A default unit for '[time]'. 'second' is its default. substance : str, optional A default unit for '[substance]' (the number of molecules). 'item' is its default. volume : str, optional A default unit for '[volume]'. Its default is None, thus '[length]**3'. other : tuple, optional A list of user-defined default units other than the above. Returns ------- ureg : pint.UnitRegistry """ ureg = pint.UnitRegistry() ureg.define('item = mole / (avogadro_number * 1 mole)') try: pint.molar # except UndefinedUnitError: except AttributeError: # https://github.com/hgrecco/pint/blob/master/pint/default_en.txt#L75-L77 ureg.define('[concentration] = [substance] / [volume]') ureg.define('molar = mol / (1e-3 * m ** 3) = M') base_units = [unit for unit in (length, time, substance, volume) if unit is not None] base_units.extend(other) _ = ureg.System.from_lines( ["@system local using international"] + base_units, ureg.get_base_units) ureg.default_system = 'local' wrap_quantity(ureg.Quantity) pint.set_application_registry(ureg) # for pickling return ureg
python
def getUnitRegistry(length="meter", time="second", substance="item", volume=None, other=()): """Return a pint.UnitRegistry made compatible with ecell4. Parameters ---------- length : str, optional A default unit for '[length]'. 'meter' is its default. time : str, optional A default unit for '[time]'. 'second' is its default. substance : str, optional A default unit for '[substance]' (the number of molecules). 'item' is its default. volume : str, optional A default unit for '[volume]'. Its default is None, thus '[length]**3'. other : tuple, optional A list of user-defined default units other than the above. Returns ------- ureg : pint.UnitRegistry """ ureg = pint.UnitRegistry() ureg.define('item = mole / (avogadro_number * 1 mole)') try: pint.molar # except UndefinedUnitError: except AttributeError: # https://github.com/hgrecco/pint/blob/master/pint/default_en.txt#L75-L77 ureg.define('[concentration] = [substance] / [volume]') ureg.define('molar = mol / (1e-3 * m ** 3) = M') base_units = [unit for unit in (length, time, substance, volume) if unit is not None] base_units.extend(other) _ = ureg.System.from_lines( ["@system local using international"] + base_units, ureg.get_base_units) ureg.default_system = 'local' wrap_quantity(ureg.Quantity) pint.set_application_registry(ureg) # for pickling return ureg
[ "def", "getUnitRegistry", "(", "length", "=", "\"meter\"", ",", "time", "=", "\"second\"", ",", "substance", "=", "\"item\"", ",", "volume", "=", "None", ",", "other", "=", "(", ")", ")", ":", "ureg", "=", "pint", ".", "UnitRegistry", "(", ")", "ureg",...
Return a pint.UnitRegistry made compatible with ecell4. Parameters ---------- length : str, optional A default unit for '[length]'. 'meter' is its default. time : str, optional A default unit for '[time]'. 'second' is its default. substance : str, optional A default unit for '[substance]' (the number of molecules). 'item' is its default. volume : str, optional A default unit for '[volume]'. Its default is None, thus '[length]**3'. other : tuple, optional A list of user-defined default units other than the above. Returns ------- ureg : pint.UnitRegistry
[ "Return", "a", "pint", ".", "UnitRegistry", "made", "compatible", "with", "ecell4", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/_unit.py#L32-L74
train
ecell/ecell4
ecell4/datasource/biogrid.py
biogridDataSource.interactor
def interactor(self, geneList=None, org=None): """ Supposing geneList returns an unique item. """ geneList = geneList or [] organisms = organisms or [] querydata = self.interactions(geneList, org) returnData = {} for i in querydata: if not returnData.get(i["symB"]["name"]): returnData[i["symB"]["name"]] = {"interactions": []} returnData[i["symB"]["name"]]["interactions"].append(i) return returnData
python
def interactor(self, geneList=None, org=None): """ Supposing geneList returns an unique item. """ geneList = geneList or [] organisms = organisms or [] querydata = self.interactions(geneList, org) returnData = {} for i in querydata: if not returnData.get(i["symB"]["name"]): returnData[i["symB"]["name"]] = {"interactions": []} returnData[i["symB"]["name"]]["interactions"].append(i) return returnData
[ "def", "interactor", "(", "self", ",", "geneList", "=", "None", ",", "org", "=", "None", ")", ":", "geneList", "=", "geneList", "or", "[", "]", "organisms", "=", "organisms", "or", "[", "]", "querydata", "=", "self", ".", "interactions", "(", "geneList...
Supposing geneList returns an unique item.
[ "Supposing", "geneList", "returns", "an", "unique", "item", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/datasource/biogrid.py#L96-L109
train
ecell/ecell4
ecell4/util/ports.py
save_sbml
def save_sbml(filename, model, y0=None, volume=1.0, is_valid=True): """ Save a model in the SBML format. Parameters ---------- model : NetworkModel y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume. is_valid : bool, optional Check if the generated model is valid. True as a default. """ y0 = y0 or {} import libsbml document = export_sbml(model, y0, volume, is_valid) # with open(filename, 'w') as fout: # fout.write(libsbml.writeSBMLToString(document)) # writer = libsbml.SBMLWriter() # writer.writeSBML(document, filename) libsbml.writeSBML(document, filename)
python
def save_sbml(filename, model, y0=None, volume=1.0, is_valid=True): """ Save a model in the SBML format. Parameters ---------- model : NetworkModel y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume. is_valid : bool, optional Check if the generated model is valid. True as a default. """ y0 = y0 or {} import libsbml document = export_sbml(model, y0, volume, is_valid) # with open(filename, 'w') as fout: # fout.write(libsbml.writeSBMLToString(document)) # writer = libsbml.SBMLWriter() # writer.writeSBML(document, filename) libsbml.writeSBML(document, filename)
[ "def", "save_sbml", "(", "filename", ",", "model", ",", "y0", "=", "None", ",", "volume", "=", "1.0", ",", "is_valid", "=", "True", ")", ":", "y0", "=", "y0", "or", "{", "}", "import", "libsbml", "document", "=", "export_sbml", "(", "model", ",", "...
Save a model in the SBML format. Parameters ---------- model : NetworkModel y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume. is_valid : bool, optional Check if the generated model is valid. True as a default.
[ "Save", "a", "model", "in", "the", "SBML", "format", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/ports.py#L220-L245
train
ecell/ecell4
ecell4/util/ports.py
load_sbml
def load_sbml(filename): """ Load a model from a SBML file. Parameters ---------- filename : str The input SBML filename. Returns ------- model : NetworkModel y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume. """ import libsbml document = libsbml.readSBML(filename) document.validateSBML() num_errors = (document.getNumErrors(libsbml.LIBSBML_SEV_ERROR) + document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)) if num_errors > 0: messages = "The generated document is not valid." messages += " {} errors were found:\n".format(num_errors) for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_ERROR)): err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_ERROR) messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage()) for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)): err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_FATAL) messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage()) raise RuntimeError(messages) return import_sbml(document)
python
def load_sbml(filename): """ Load a model from a SBML file. Parameters ---------- filename : str The input SBML filename. Returns ------- model : NetworkModel y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume. """ import libsbml document = libsbml.readSBML(filename) document.validateSBML() num_errors = (document.getNumErrors(libsbml.LIBSBML_SEV_ERROR) + document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)) if num_errors > 0: messages = "The generated document is not valid." messages += " {} errors were found:\n".format(num_errors) for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_ERROR)): err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_ERROR) messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage()) for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)): err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_FATAL) messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage()) raise RuntimeError(messages) return import_sbml(document)
[ "def", "load_sbml", "(", "filename", ")", ":", "import", "libsbml", "document", "=", "libsbml", ".", "readSBML", "(", "filename", ")", "document", ".", "validateSBML", "(", ")", "num_errors", "=", "(", "document", ".", "getNumErrors", "(", "libsbml", ".", ...
Load a model from a SBML file. Parameters ---------- filename : str The input SBML filename. Returns ------- model : NetworkModel y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume.
[ "Load", "a", "model", "from", "a", "SBML", "file", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/ports.py#L377-L411
train
ecell/ecell4
ecell4/util/decorator.py
get_model
def get_model(is_netfree=False, without_reset=False, seeds=None, effective=False): """ Generate a model with parameters in the global scope, ``SPECIES_ATTRIBUTES`` and ``REACTIONRULES``. Parameters ---------- is_netfree : bool, optional Return ``NetfreeModel`` if True, and ``NetworkModel`` if else. Default is False. without_reset : bool, optional Do not reset the global variables after the generation if True. Default is False. seeds : list, optional A list of seed ``Species`` for expanding the model. If this is not None, generate a ``NetfreeModel`` once, and return a ``NetworkModel``, which is an expanded form of that with the given seeds. Default is None. effective : bool, optional See ``NetfreeModel.effective`` and ``Netfree.set_effective``. Only meaningfull with option ``is_netfree=True``. Default is False Returns ------- model : NetworkModel, NetfreeModel """ try: if seeds is not None or is_netfree: m = ecell4_base.core.NetfreeModel() else: m = ecell4_base.core.NetworkModel() for sp in SPECIES_ATTRIBUTES: m.add_species_attribute(sp) for rr in REACTION_RULES: m.add_reaction_rule(rr) if not without_reset: reset_model() if seeds is not None: return m.expand(seeds) if isinstance(m, ecell4_base.core.NetfreeModel): m.set_effective(effective) except Exception as e: reset_model() raise e return m
python
def get_model(is_netfree=False, without_reset=False, seeds=None, effective=False): """ Generate a model with parameters in the global scope, ``SPECIES_ATTRIBUTES`` and ``REACTIONRULES``. Parameters ---------- is_netfree : bool, optional Return ``NetfreeModel`` if True, and ``NetworkModel`` if else. Default is False. without_reset : bool, optional Do not reset the global variables after the generation if True. Default is False. seeds : list, optional A list of seed ``Species`` for expanding the model. If this is not None, generate a ``NetfreeModel`` once, and return a ``NetworkModel``, which is an expanded form of that with the given seeds. Default is None. effective : bool, optional See ``NetfreeModel.effective`` and ``Netfree.set_effective``. Only meaningfull with option ``is_netfree=True``. Default is False Returns ------- model : NetworkModel, NetfreeModel """ try: if seeds is not None or is_netfree: m = ecell4_base.core.NetfreeModel() else: m = ecell4_base.core.NetworkModel() for sp in SPECIES_ATTRIBUTES: m.add_species_attribute(sp) for rr in REACTION_RULES: m.add_reaction_rule(rr) if not without_reset: reset_model() if seeds is not None: return m.expand(seeds) if isinstance(m, ecell4_base.core.NetfreeModel): m.set_effective(effective) except Exception as e: reset_model() raise e return m
[ "def", "get_model", "(", "is_netfree", "=", "False", ",", "without_reset", "=", "False", ",", "seeds", "=", "None", ",", "effective", "=", "False", ")", ":", "try", ":", "if", "seeds", "is", "not", "None", "or", "is_netfree", ":", "m", "=", "ecell4_bas...
Generate a model with parameters in the global scope, ``SPECIES_ATTRIBUTES`` and ``REACTIONRULES``. Parameters ---------- is_netfree : bool, optional Return ``NetfreeModel`` if True, and ``NetworkModel`` if else. Default is False. without_reset : bool, optional Do not reset the global variables after the generation if True. Default is False. seeds : list, optional A list of seed ``Species`` for expanding the model. If this is not None, generate a ``NetfreeModel`` once, and return a ``NetworkModel``, which is an expanded form of that with the given seeds. Default is None. effective : bool, optional See ``NetfreeModel.effective`` and ``Netfree.set_effective``. Only meaningfull with option ``is_netfree=True``. Default is False Returns ------- model : NetworkModel, NetfreeModel
[ "Generate", "a", "model", "with", "parameters", "in", "the", "global", "scope", "SPECIES_ATTRIBUTES", "and", "REACTIONRULES", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/decorator.py#L143-L194
train
ecell/ecell4
ecell4/extra/ensemble.py
run_serial
def run_serial(target, jobs, n=1, **kwargs): """ Evaluate the given function with each set of arguments, and return a list of results. This function does in series. Parameters ---------- target : function A function to be evaluated. The function must accepts three arguments, which are a list of arguments given as `jobs`, a job and task id (int). jobs : list A list of arguments passed to the function. n : int, optional A number of tasks. Repeat the evaluation `n` times for each job. 1 for default. Returns ------- results : list A list of results. Each element is a list containing `n` results. Examples -------- >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs')) >>> target = lambda args, job_id, task_id: (args[1] * args[0]) >>> run_serial(target, jobs) [['spam'], ['hamham'], ['eggseggseggs']] >>> target = lambda args, job_id, task_id: "{:d} {}".format(task_id, args[1] * args[0]) >>> run_serial(target, jobs, n=2) [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']] >>> seeds = genseeds(3) >>> def target(arg, job_id, task_id): ... from ecell4.extra.ensemble import getseed ... return getseed(arg, task_id) >>> run_serial(target, (seeds, ), n=3) # doctest: +SKIP [[127152315, 2028054913, 253611282]] See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure """ return [[target(copy.copy(job), i + 1, j + 1) for j in range(n)] for i, job in enumerate(jobs)]
python
def run_serial(target, jobs, n=1, **kwargs): """ Evaluate the given function with each set of arguments, and return a list of results. This function does in series. Parameters ---------- target : function A function to be evaluated. The function must accepts three arguments, which are a list of arguments given as `jobs`, a job and task id (int). jobs : list A list of arguments passed to the function. n : int, optional A number of tasks. Repeat the evaluation `n` times for each job. 1 for default. Returns ------- results : list A list of results. Each element is a list containing `n` results. Examples -------- >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs')) >>> target = lambda args, job_id, task_id: (args[1] * args[0]) >>> run_serial(target, jobs) [['spam'], ['hamham'], ['eggseggseggs']] >>> target = lambda args, job_id, task_id: "{:d} {}".format(task_id, args[1] * args[0]) >>> run_serial(target, jobs, n=2) [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']] >>> seeds = genseeds(3) >>> def target(arg, job_id, task_id): ... from ecell4.extra.ensemble import getseed ... return getseed(arg, task_id) >>> run_serial(target, (seeds, ), n=3) # doctest: +SKIP [[127152315, 2028054913, 253611282]] See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure """ return [[target(copy.copy(job), i + 1, j + 1) for j in range(n)] for i, job in enumerate(jobs)]
[ "def", "run_serial", "(", "target", ",", "jobs", ",", "n", "=", "1", ",", "**", "kwargs", ")", ":", "return", "[", "[", "target", "(", "copy", ".", "copy", "(", "job", ")", ",", "i", "+", "1", ",", "j", "+", "1", ")", "for", "j", "in", "ran...
Evaluate the given function with each set of arguments, and return a list of results. This function does in series. Parameters ---------- target : function A function to be evaluated. The function must accepts three arguments, which are a list of arguments given as `jobs`, a job and task id (int). jobs : list A list of arguments passed to the function. n : int, optional A number of tasks. Repeat the evaluation `n` times for each job. 1 for default. Returns ------- results : list A list of results. Each element is a list containing `n` results. Examples -------- >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs')) >>> target = lambda args, job_id, task_id: (args[1] * args[0]) >>> run_serial(target, jobs) [['spam'], ['hamham'], ['eggseggseggs']] >>> target = lambda args, job_id, task_id: "{:d} {}".format(task_id, args[1] * args[0]) >>> run_serial(target, jobs, n=2) [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']] >>> seeds = genseeds(3) >>> def target(arg, job_id, task_id): ... from ecell4.extra.ensemble import getseed ... return getseed(arg, task_id) >>> run_serial(target, (seeds, ), n=3) # doctest: +SKIP [[127152315, 2028054913, 253611282]] See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure
[ "Evaluate", "the", "given", "function", "with", "each", "set", "of", "arguments", "and", "return", "a", "list", "of", "results", ".", "This", "function", "does", "in", "series", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/ensemble.py#L22-L71
train
ecell/ecell4
ecell4/extra/ensemble.py
run_multiprocessing
def run_multiprocessing(target, jobs, n=1, nproc=None, **kwargs): """ Evaluate the given function with each set of arguments, and return a list of results. This function does in parallel by using `multiprocessing`. Parameters ---------- target : function A function to be evaluated. The function must accepts three arguments, which are a list of arguments given as `jobs`, a job and task id (int). jobs : list A list of arguments passed to the function. All the argument must be picklable. n : int, optional A number of tasks. Repeat the evaluation `n` times for each job. 1 for default. nproc : int, optional A number of cores available once. If nothing is given, all available cores are used. Returns ------- results : list A list of results. Each element is a list containing `n` results. Examples -------- >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs')) >>> target = lambda args, job_id, task_id: (args[1] * args[0]) >>> run_multiprocessing(target, jobs, nproc=2) [['spam'], ['hamham'], ['eggseggseggs']] >>> target = lambda args, job_id, task_id: "{:d} {}".format(task_id, args[1] * args[0]) >>> run_multiprocessing(target, jobs, n=2, nproc=2) [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']] See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure """ def consumer(f, q_in, q_out): while True: val = q_in.get() if val is None: q_in.task_done() break i, x = val res = (i, f(*x)) q_in.task_done() q_out.put(res) def mulpmap(f, X, nproc): nproc = nproc or multiprocessing.cpu_count() q_in = multiprocessing.JoinableQueue() q_out = multiprocessing.Queue() workers = [multiprocessing.Process(target=consumer, args=(f, q_in, q_out), daemon=True) for _ in range(nproc)] sent = [q_in.put((i, x)) for i, x in enumerate(X)] num_tasks = len(sent) [q_in.put(None) for _ in range(nproc)] #XXX: poison pill [w.start() for w in workers] # [w.join() for w in workers] q_in.join() res = [q_out.get() for _ in range(num_tasks)] return [x for (_, x) in sorted(res)] res = mulpmap( target, ((job, i + 1, j + 1) for (i, job), j in itertools.product(enumerate(jobs), range(n))), nproc) return [res[i: i + n] for i in range(0, len(res), n)]
python
def run_multiprocessing(target, jobs, n=1, nproc=None, **kwargs): """ Evaluate the given function with each set of arguments, and return a list of results. This function does in parallel by using `multiprocessing`. Parameters ---------- target : function A function to be evaluated. The function must accepts three arguments, which are a list of arguments given as `jobs`, a job and task id (int). jobs : list A list of arguments passed to the function. All the argument must be picklable. n : int, optional A number of tasks. Repeat the evaluation `n` times for each job. 1 for default. nproc : int, optional A number of cores available once. If nothing is given, all available cores are used. Returns ------- results : list A list of results. Each element is a list containing `n` results. Examples -------- >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs')) >>> target = lambda args, job_id, task_id: (args[1] * args[0]) >>> run_multiprocessing(target, jobs, nproc=2) [['spam'], ['hamham'], ['eggseggseggs']] >>> target = lambda args, job_id, task_id: "{:d} {}".format(task_id, args[1] * args[0]) >>> run_multiprocessing(target, jobs, n=2, nproc=2) [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']] See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure """ def consumer(f, q_in, q_out): while True: val = q_in.get() if val is None: q_in.task_done() break i, x = val res = (i, f(*x)) q_in.task_done() q_out.put(res) def mulpmap(f, X, nproc): nproc = nproc or multiprocessing.cpu_count() q_in = multiprocessing.JoinableQueue() q_out = multiprocessing.Queue() workers = [multiprocessing.Process(target=consumer, args=(f, q_in, q_out), daemon=True) for _ in range(nproc)] sent = [q_in.put((i, x)) for i, x in enumerate(X)] num_tasks = len(sent) [q_in.put(None) for _ in range(nproc)] #XXX: poison pill [w.start() for w in workers] # [w.join() for w in workers] q_in.join() res = [q_out.get() for _ in range(num_tasks)] return [x for (_, x) in sorted(res)] res = mulpmap( target, ((job, i + 1, j + 1) for (i, job), j in itertools.product(enumerate(jobs), range(n))), nproc) return [res[i: i + n] for i in range(0, len(res), n)]
[ "def", "run_multiprocessing", "(", "target", ",", "jobs", ",", "n", "=", "1", ",", "nproc", "=", "None", ",", "**", "kwargs", ")", ":", "def", "consumer", "(", "f", ",", "q_in", ",", "q_out", ")", ":", "while", "True", ":", "val", "=", "q_in", "....
Evaluate the given function with each set of arguments, and return a list of results. This function does in parallel by using `multiprocessing`. Parameters ---------- target : function A function to be evaluated. The function must accepts three arguments, which are a list of arguments given as `jobs`, a job and task id (int). jobs : list A list of arguments passed to the function. All the argument must be picklable. n : int, optional A number of tasks. Repeat the evaluation `n` times for each job. 1 for default. nproc : int, optional A number of cores available once. If nothing is given, all available cores are used. Returns ------- results : list A list of results. Each element is a list containing `n` results. Examples -------- >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs')) >>> target = lambda args, job_id, task_id: (args[1] * args[0]) >>> run_multiprocessing(target, jobs, nproc=2) [['spam'], ['hamham'], ['eggseggseggs']] >>> target = lambda args, job_id, task_id: "{:d} {}".format(task_id, args[1] * args[0]) >>> run_multiprocessing(target, jobs, n=2, nproc=2) [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']] See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure
[ "Evaluate", "the", "given", "function", "with", "each", "set", "of", "arguments", "and", "return", "a", "list", "of", "results", ".", "This", "function", "does", "in", "parallel", "by", "using", "multiprocessing", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/ensemble.py#L73-L147
train
ecell/ecell4
ecell4/extra/ensemble.py
run_azure
def run_azure(target, jobs, n=1, nproc=None, path='.', delete=True, config=None, **kwargs): """ Evaluate the given function with each set of arguments, and return a list of results. This function does in parallel with Microsoft Azure Batch. This function is the work in progress. The argument `nproc` doesn't work yet. See `ecell4.extra.azure_batch.run_azure` for details. See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure ecell4.extra.azure_batch.run_azure """ import ecell4.extra.azure_batch as azure_batch return azure_batch.run_azure(target, jobs, n, path, delete, config)
python
def run_azure(target, jobs, n=1, nproc=None, path='.', delete=True, config=None, **kwargs): """ Evaluate the given function with each set of arguments, and return a list of results. This function does in parallel with Microsoft Azure Batch. This function is the work in progress. The argument `nproc` doesn't work yet. See `ecell4.extra.azure_batch.run_azure` for details. See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure ecell4.extra.azure_batch.run_azure """ import ecell4.extra.azure_batch as azure_batch return azure_batch.run_azure(target, jobs, n, path, delete, config)
[ "def", "run_azure", "(", "target", ",", "jobs", ",", "n", "=", "1", ",", "nproc", "=", "None", ",", "path", "=", "'.'", ",", "delete", "=", "True", ",", "config", "=", "None", ",", "**", "kwargs", ")", ":", "import", "ecell4", ".", "extra", ".", ...
Evaluate the given function with each set of arguments, and return a list of results. This function does in parallel with Microsoft Azure Batch. This function is the work in progress. The argument `nproc` doesn't work yet. See `ecell4.extra.azure_batch.run_azure` for details. See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure ecell4.extra.azure_batch.run_azure
[ "Evaluate", "the", "given", "function", "with", "each", "set", "of", "arguments", "and", "return", "a", "list", "of", "results", ".", "This", "function", "does", "in", "parallel", "with", "Microsoft", "Azure", "Batch", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/ensemble.py#L503-L523
train
ecell/ecell4
ecell4/extra/ensemble.py
getseed
def getseed(myseed, i): """ Return a single seed from a long seed given by `genseeds`. Parameters ---------- myseed : bytes A long seed given by `genseeds(n)`. i : int An index less than n. Returns ------- rndseed : int A seed (less than (2 ** 31)) """ rndseed = int(myseed[(i - 1) * 8: i * 8], 16) rndseed = rndseed % (2 ** 31) #XXX: trancate the first bit return rndseed
python
def getseed(myseed, i): """ Return a single seed from a long seed given by `genseeds`. Parameters ---------- myseed : bytes A long seed given by `genseeds(n)`. i : int An index less than n. Returns ------- rndseed : int A seed (less than (2 ** 31)) """ rndseed = int(myseed[(i - 1) * 8: i * 8], 16) rndseed = rndseed % (2 ** 31) #XXX: trancate the first bit return rndseed
[ "def", "getseed", "(", "myseed", ",", "i", ")", ":", "rndseed", "=", "int", "(", "myseed", "[", "(", "i", "-", "1", ")", "*", "8", ":", "i", "*", "8", "]", ",", "16", ")", "rndseed", "=", "rndseed", "%", "(", "2", "**", "31", ")", "return",...
Return a single seed from a long seed given by `genseeds`. Parameters ---------- myseed : bytes A long seed given by `genseeds(n)`. i : int An index less than n. Returns ------- rndseed : int A seed (less than (2 ** 31))
[ "Return", "a", "single", "seed", "from", "a", "long", "seed", "given", "by", "genseeds", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/ensemble.py#L543-L562
train
ecell/ecell4
ecell4/extra/ensemble.py
list_species
def list_species(model, seeds=None): """This function is deprecated.""" seeds = None or [] from ecell4_base.core import Species if not isinstance(seeds, list): seeds = list(seeds) expanded = model.expand([Species(serial) for serial in seeds]) species_list = [sp.serial() for sp in expanded.list_species()] species_list = sorted(set(seeds + species_list)) return species_list
python
def list_species(model, seeds=None): """This function is deprecated.""" seeds = None or [] from ecell4_base.core import Species if not isinstance(seeds, list): seeds = list(seeds) expanded = model.expand([Species(serial) for serial in seeds]) species_list = [sp.serial() for sp in expanded.list_species()] species_list = sorted(set(seeds + species_list)) return species_list
[ "def", "list_species", "(", "model", ",", "seeds", "=", "None", ")", ":", "seeds", "=", "None", "or", "[", "]", "from", "ecell4_base", ".", "core", "import", "Species", "if", "not", "isinstance", "(", "seeds", ",", "list", ")", ":", "seeds", "=", "li...
This function is deprecated.
[ "This", "function", "is", "deprecated", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/ensemble.py#L582-L594
train
dlon/html2markdown
html2markdown.py
_escapeCharacters
def _escapeCharacters(tag): """non-recursively escape underlines and asterisks in the tag""" for i,c in enumerate(tag.contents): if type(c) != bs4.element.NavigableString: continue c.replace_with(_escapeCharSub(r'\\\1', c))
python
def _escapeCharacters(tag): """non-recursively escape underlines and asterisks in the tag""" for i,c in enumerate(tag.contents): if type(c) != bs4.element.NavigableString: continue c.replace_with(_escapeCharSub(r'\\\1', c))
[ "def", "_escapeCharacters", "(", "tag", ")", ":", "for", "i", ",", "c", "in", "enumerate", "(", "tag", ".", "contents", ")", ":", "if", "type", "(", "c", ")", "!=", "bs4", ".", "element", ".", "NavigableString", ":", "continue", "c", ".", "replace_wi...
non-recursively escape underlines and asterisks in the tag
[ "non", "-", "recursively", "escape", "underlines", "and", "asterisks", "in", "the", "tag" ]
5946da7136e69a67b3dd37fd0e896be4d6a5b482
https://github.com/dlon/html2markdown/blob/5946da7136e69a67b3dd37fd0e896be4d6a5b482/html2markdown.py#L148-L154
train
dlon/html2markdown
html2markdown.py
_breakRemNewlines
def _breakRemNewlines(tag): """non-recursively break spaces and remove newlines in the tag""" for i,c in enumerate(tag.contents): if type(c) != bs4.element.NavigableString: continue c.replace_with(re.sub(r' {2,}', ' ', c).replace('\n',''))
python
def _breakRemNewlines(tag): """non-recursively break spaces and remove newlines in the tag""" for i,c in enumerate(tag.contents): if type(c) != bs4.element.NavigableString: continue c.replace_with(re.sub(r' {2,}', ' ', c).replace('\n',''))
[ "def", "_breakRemNewlines", "(", "tag", ")", ":", "for", "i", ",", "c", "in", "enumerate", "(", "tag", ".", "contents", ")", ":", "if", "type", "(", "c", ")", "!=", "bs4", ".", "element", ".", "NavigableString", ":", "continue", "c", ".", "replace_wi...
non-recursively break spaces and remove newlines in the tag
[ "non", "-", "recursively", "break", "spaces", "and", "remove", "newlines", "in", "the", "tag" ]
5946da7136e69a67b3dd37fd0e896be4d6a5b482
https://github.com/dlon/html2markdown/blob/5946da7136e69a67b3dd37fd0e896be4d6a5b482/html2markdown.py#L156-L161
train
dlon/html2markdown
html2markdown.py
convert
def convert(html): """converts an html string to markdown while preserving unsupported markup.""" bs = BeautifulSoup(html, 'html.parser') _markdownify(bs) ret = unicode(bs).replace(u'\xa0', '&nbsp;') ret = re.sub(r'\n{3,}', r'\n\n', ret) # ! FIXME: hack ret = re.sub(r'&lt;&lt;&lt;FLOATING LINK: (.+)&gt;&gt;&gt;', r'<\1>', ret) # ! FIXME: hack sp = re.split(r'(&lt;&lt;&lt;BLOCKQUOTE: .*?&gt;&gt;&gt;)', ret, flags=re.DOTALL) for i,e in enumerate(sp): if e[:len('&lt;&lt;&lt;BLOCKQUOTE:')] == '&lt;&lt;&lt;BLOCKQUOTE:': sp[i] = '> ' + e[len('&lt;&lt;&lt;BLOCKQUOTE:') : -len('&gt;&gt;&gt;')] sp[i] = sp[i].replace('\n', '\n> ') ret = ''.join(sp) return ret.strip('\n')
python
def convert(html): """converts an html string to markdown while preserving unsupported markup.""" bs = BeautifulSoup(html, 'html.parser') _markdownify(bs) ret = unicode(bs).replace(u'\xa0', '&nbsp;') ret = re.sub(r'\n{3,}', r'\n\n', ret) # ! FIXME: hack ret = re.sub(r'&lt;&lt;&lt;FLOATING LINK: (.+)&gt;&gt;&gt;', r'<\1>', ret) # ! FIXME: hack sp = re.split(r'(&lt;&lt;&lt;BLOCKQUOTE: .*?&gt;&gt;&gt;)', ret, flags=re.DOTALL) for i,e in enumerate(sp): if e[:len('&lt;&lt;&lt;BLOCKQUOTE:')] == '&lt;&lt;&lt;BLOCKQUOTE:': sp[i] = '> ' + e[len('&lt;&lt;&lt;BLOCKQUOTE:') : -len('&gt;&gt;&gt;')] sp[i] = sp[i].replace('\n', '\n> ') ret = ''.join(sp) return ret.strip('\n')
[ "def", "convert", "(", "html", ")", ":", "bs", "=", "BeautifulSoup", "(", "html", ",", "'html.parser'", ")", "_markdownify", "(", "bs", ")", "ret", "=", "unicode", "(", "bs", ")", ".", "replace", "(", "u'\\xa0'", ",", "'&nbsp;'", ")", "ret", "=", "re...
converts an html string to markdown while preserving unsupported markup.
[ "converts", "an", "html", "string", "to", "markdown", "while", "preserving", "unsupported", "markup", "." ]
5946da7136e69a67b3dd37fd0e896be4d6a5b482
https://github.com/dlon/html2markdown/blob/5946da7136e69a67b3dd37fd0e896be4d6a5b482/html2markdown.py#L332-L347
train
timknip/pyswf
swf/filters.py
SWFFilterFactory.create
def create(cls, type): """ Return the specified Filter """ if type == 0: return FilterDropShadow(id) elif type == 1: return FilterBlur(id) elif type == 2: return FilterGlow(id) elif type == 3: return FilterBevel(id) elif type == 4: return FilterGradientGlow(id) elif type == 5: return FilterConvolution(id) elif type == 6: return FilterColorMatrix(id) elif type == 7: return FilterGradientBevel(id) else: raise Exception("Unknown filter type: %d" % type)
python
def create(cls, type): """ Return the specified Filter """ if type == 0: return FilterDropShadow(id) elif type == 1: return FilterBlur(id) elif type == 2: return FilterGlow(id) elif type == 3: return FilterBevel(id) elif type == 4: return FilterGradientGlow(id) elif type == 5: return FilterConvolution(id) elif type == 6: return FilterColorMatrix(id) elif type == 7: return FilterGradientBevel(id) else: raise Exception("Unknown filter type: %d" % type)
[ "def", "create", "(", "cls", ",", "type", ")", ":", "if", "type", "==", "0", ":", "return", "FilterDropShadow", "(", "id", ")", "elif", "type", "==", "1", ":", "return", "FilterBlur", "(", "id", ")", "elif", "type", "==", "2", ":", "return", "Filte...
Return the specified Filter
[ "Return", "the", "specified", "Filter" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/filters.py#L220-L231
train
timknip/pyswf
swf/movie.py
SWF.export
def export(self, exporter=None, force_stroke=False): """ Export this SWF using the specified exporter. When no exporter is passed in the default exporter used is swf.export.SVGExporter. Exporters should extend the swf.export.BaseExporter class. @param exporter : the exporter to use @param force_stroke : set to true to force strokes on fills, useful for some edge cases. """ exporter = SVGExporter() if exporter is None else exporter if self._data is None: raise Exception("This SWF was not loaded! (no data)") if len(self.tags) == 0: raise Exception("This SWF doesn't contain any tags!") return exporter.export(self, force_stroke)
python
def export(self, exporter=None, force_stroke=False): """ Export this SWF using the specified exporter. When no exporter is passed in the default exporter used is swf.export.SVGExporter. Exporters should extend the swf.export.BaseExporter class. @param exporter : the exporter to use @param force_stroke : set to true to force strokes on fills, useful for some edge cases. """ exporter = SVGExporter() if exporter is None else exporter if self._data is None: raise Exception("This SWF was not loaded! (no data)") if len(self.tags) == 0: raise Exception("This SWF doesn't contain any tags!") return exporter.export(self, force_stroke)
[ "def", "export", "(", "self", ",", "exporter", "=", "None", ",", "force_stroke", "=", "False", ")", ":", "exporter", "=", "SVGExporter", "(", ")", "if", "exporter", "is", "None", "else", "exporter", "if", "self", ".", "_data", "is", "None", ":", "raise...
Export this SWF using the specified exporter. When no exporter is passed in the default exporter used is swf.export.SVGExporter. Exporters should extend the swf.export.BaseExporter class. @param exporter : the exporter to use @param force_stroke : set to true to force strokes on fills, useful for some edge cases.
[ "Export", "this", "SWF", "using", "the", "specified", "exporter", ".", "When", "no", "exporter", "is", "passed", "in", "the", "default", "exporter", "used", "is", "swf", ".", "export", ".", "SVGExporter", ".", "Exporters", "should", "extend", "the", "swf", ...
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/movie.py#L114-L131
train
timknip/pyswf
swf/movie.py
SWF.parse
def parse(self, data): """ Parses the SWF. The @data parameter can be a file object or a SWFStream """ self._data = data = data if isinstance(data, SWFStream) else SWFStream(data) self._header = SWFHeader(self._data) if self._header.compressed: temp = BytesIO() if self._header.compressed_zlib: import zlib data = data.f.read() zip = zlib.decompressobj() temp.write(zip.decompress(data)) else: import pylzma data.readUI32() #consume compressed length data = data.f.read() temp.write(pylzma.decompress(data)) temp.seek(0) data = SWFStream(temp) self._header._frame_size = data.readRECT() self._header._frame_rate = data.readFIXED8() self._header._frame_count = data.readUI16() self.parse_tags(data)
python
def parse(self, data): """ Parses the SWF. The @data parameter can be a file object or a SWFStream """ self._data = data = data if isinstance(data, SWFStream) else SWFStream(data) self._header = SWFHeader(self._data) if self._header.compressed: temp = BytesIO() if self._header.compressed_zlib: import zlib data = data.f.read() zip = zlib.decompressobj() temp.write(zip.decompress(data)) else: import pylzma data.readUI32() #consume compressed length data = data.f.read() temp.write(pylzma.decompress(data)) temp.seek(0) data = SWFStream(temp) self._header._frame_size = data.readRECT() self._header._frame_rate = data.readFIXED8() self._header._frame_count = data.readUI16() self.parse_tags(data)
[ "def", "parse", "(", "self", ",", "data", ")", ":", "self", ".", "_data", "=", "data", "=", "data", "if", "isinstance", "(", "data", ",", "SWFStream", ")", "else", "SWFStream", "(", "data", ")", "self", ".", "_header", "=", "SWFHeader", "(", "self", ...
Parses the SWF. The @data parameter can be a file object or a SWFStream
[ "Parses", "the", "SWF", ".", "The" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/movie.py#L137-L162
train
timknip/pyswf
swf/stream.py
int32
def int32(x): """ Return a signed or unsigned int """ if x>0xFFFFFFFF: raise OverflowError if x>0x7FFFFFFF: x=int(0x100000000-x) if x<2147483648: return -x else: return -2147483648 return x
python
def int32(x): """ Return a signed or unsigned int """ if x>0xFFFFFFFF: raise OverflowError if x>0x7FFFFFFF: x=int(0x100000000-x) if x<2147483648: return -x else: return -2147483648 return x
[ "def", "int32", "(", "x", ")", ":", "if", "x", ">", "0xFFFFFFFF", ":", "raise", "OverflowError", "if", "x", ">", "0x7FFFFFFF", ":", "x", "=", "int", "(", "0x100000000", "-", "x", ")", "if", "x", "<", "2147483648", ":", "return", "-", "x", "else", ...
Return a signed or unsigned int
[ "Return", "a", "signed", "or", "unsigned", "int" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L490-L500
train
timknip/pyswf
swf/stream.py
SWFStream.bin
def bin(self, s): """ Return a value as a binary string """ return str(s) if s<=1 else bin(s>>1) + str(s&1)
python
def bin(self, s): """ Return a value as a binary string """ return str(s) if s<=1 else bin(s>>1) + str(s&1)
[ "def", "bin", "(", "self", ",", "s", ")", ":", "return", "str", "(", "s", ")", "if", "s", "<=", "1", "else", "bin", "(", "s", ">>", "1", ")", "+", "str", "(", "s", "&", "1", ")" ]
Return a value as a binary string
[ "Return", "a", "value", "as", "a", "binary", "string" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L22-L24
train
timknip/pyswf
swf/stream.py
SWFStream.calc_max_bits
def calc_max_bits(self, signed, values): """ Calculates the maximim needed bits to represent a value """ b = 0 vmax = -10000000 for val in values: if signed: b = b | val if val >= 0 else b | ~val << 1 vmax = val if vmax < val else vmax else: b |= val; bits = 0 if b > 0: bits = len(self.bin(b)) - 2 if signed and vmax > 0 and len(self.bin(vmax)) - 2 >= bits: bits += 1 return bits
python
def calc_max_bits(self, signed, values): """ Calculates the maximim needed bits to represent a value """ b = 0 vmax = -10000000 for val in values: if signed: b = b | val if val >= 0 else b | ~val << 1 vmax = val if vmax < val else vmax else: b |= val; bits = 0 if b > 0: bits = len(self.bin(b)) - 2 if signed and vmax > 0 and len(self.bin(vmax)) - 2 >= bits: bits += 1 return bits
[ "def", "calc_max_bits", "(", "self", ",", "signed", ",", "values", ")", ":", "b", "=", "0", "vmax", "=", "-", "10000000", "for", "val", "in", "values", ":", "if", "signed", ":", "b", "=", "b", "|", "val", "if", "val", ">=", "0", "else", "b", "|...
Calculates the maximim needed bits to represent a value
[ "Calculates", "the", "maximim", "needed", "bits", "to", "represent", "a", "value" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L26-L42
train
timknip/pyswf
swf/stream.py
SWFStream.readbits
def readbits(self, bits): """ Read the specified number of bits from the stream. Returns 0 for bits == 0. """ if bits == 0: return 0 # fast byte-aligned path if bits % 8 == 0 and self._bits_pending == 0: return self._read_bytes_aligned(bits // 8) out = 0 masks = self._masks def transfer_bits(x, y, n, t): """ transfers t bits from the top of y_n to the bottom of x. then returns x and the remaining bits in y """ if n == t: # taking all return (x << t) | y, 0 mask = masks[t] # (1 << t) - 1 remainmask = masks[n - t] # (1 << n - t) - 1 taken = ((y >> n - t) & mask) return (x << t) | taken, y & remainmask while bits > 0: if self._bits_pending > 0: assert self._partial_byte is not None take = min(self._bits_pending, bits) out, self._partial_byte = transfer_bits(out, self._partial_byte, self._bits_pending, take) if take == self._bits_pending: # we took them all self._partial_byte = None self._bits_pending -= take bits -= take continue r = self.f.read(1) if r == b'': raise EOFError self._partial_byte = ord(r) self._bits_pending = 8 return out
python
def readbits(self, bits): """ Read the specified number of bits from the stream. Returns 0 for bits == 0. """ if bits == 0: return 0 # fast byte-aligned path if bits % 8 == 0 and self._bits_pending == 0: return self._read_bytes_aligned(bits // 8) out = 0 masks = self._masks def transfer_bits(x, y, n, t): """ transfers t bits from the top of y_n to the bottom of x. then returns x and the remaining bits in y """ if n == t: # taking all return (x << t) | y, 0 mask = masks[t] # (1 << t) - 1 remainmask = masks[n - t] # (1 << n - t) - 1 taken = ((y >> n - t) & mask) return (x << t) | taken, y & remainmask while bits > 0: if self._bits_pending > 0: assert self._partial_byte is not None take = min(self._bits_pending, bits) out, self._partial_byte = transfer_bits(out, self._partial_byte, self._bits_pending, take) if take == self._bits_pending: # we took them all self._partial_byte = None self._bits_pending -= take bits -= take continue r = self.f.read(1) if r == b'': raise EOFError self._partial_byte = ord(r) self._bits_pending = 8 return out
[ "def", "readbits", "(", "self", ",", "bits", ")", ":", "if", "bits", "==", "0", ":", "return", "0", "if", "bits", "%", "8", "==", "0", "and", "self", ".", "_bits_pending", "==", "0", ":", "return", "self", ".", "_read_bytes_aligned", "(", "bits", "...
Read the specified number of bits from the stream. Returns 0 for bits == 0.
[ "Read", "the", "specified", "number", "of", "bits", "from", "the", "stream", ".", "Returns", "0", "for", "bits", "==", "0", "." ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L56-L105
train
timknip/pyswf
swf/stream.py
SWFStream.readSB
def readSB(self, bits): """ Read a signed int using the specified number of bits """ shift = 32 - bits return int32(self.readbits(bits) << shift) >> shift
python
def readSB(self, bits): """ Read a signed int using the specified number of bits """ shift = 32 - bits return int32(self.readbits(bits) << shift) >> shift
[ "def", "readSB", "(", "self", ",", "bits", ")", ":", "shift", "=", "32", "-", "bits", "return", "int32", "(", "self", ".", "readbits", "(", "bits", ")", "<<", "shift", ")", ">>", "shift" ]
Read a signed int using the specified number of bits
[ "Read", "a", "signed", "int", "using", "the", "specified", "number", "of", "bits" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L111-L114
train
timknip/pyswf
swf/stream.py
SWFStream.readEncodedU32
def readEncodedU32(self): """ Read a encoded unsigned int """ self.reset_bits_pending(); result = self.readUI8(); if result & 0x80 != 0: result = (result & 0x7f) | (self.readUI8() << 7) if result & 0x4000 != 0: result = (result & 0x3fff) | (self.readUI8() << 14) if result & 0x200000 != 0: result = (result & 0x1fffff) | (self.readUI8() << 21) if result & 0x10000000 != 0: result = (result & 0xfffffff) | (self.readUI8() << 28) return result
python
def readEncodedU32(self): """ Read a encoded unsigned int """ self.reset_bits_pending(); result = self.readUI8(); if result & 0x80 != 0: result = (result & 0x7f) | (self.readUI8() << 7) if result & 0x4000 != 0: result = (result & 0x3fff) | (self.readUI8() << 14) if result & 0x200000 != 0: result = (result & 0x1fffff) | (self.readUI8() << 21) if result & 0x10000000 != 0: result = (result & 0xfffffff) | (self.readUI8() << 28) return result
[ "def", "readEncodedU32", "(", "self", ")", ":", "self", ".", "reset_bits_pending", "(", ")", "result", "=", "self", ".", "readUI8", "(", ")", "if", "result", "&", "0x80", "!=", "0", ":", "result", "=", "(", "result", "&", "0x7f", ")", "|", "(", "se...
Read a encoded unsigned int
[ "Read", "a", "encoded", "unsigned", "int" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L155-L167
train
timknip/pyswf
swf/stream.py
SWFStream.readFLOAT16
def readFLOAT16(self): """ Read a 2 byte float """ self.reset_bits_pending() word = self.readUI16() sign = -1 if ((word & 0x8000) != 0) else 1 exponent = (word >> 10) & 0x1f significand = word & 0x3ff if exponent == 0: if significand == 0: return 0.0 else: return sign * math.pow(2, 1 - SWFStream.FLOAT16_EXPONENT_BASE) * (significand / 1024.0) if exponent == 31: if significand == 0: return float('-inf') if sign < 0 else float('inf') else: return float('nan') # normal number return sign * math.pow(2, exponent - SWFStream.FLOAT16_EXPONENT_BASE) * (1 + significand / 1024.0)
python
def readFLOAT16(self): """ Read a 2 byte float """ self.reset_bits_pending() word = self.readUI16() sign = -1 if ((word & 0x8000) != 0) else 1 exponent = (word >> 10) & 0x1f significand = word & 0x3ff if exponent == 0: if significand == 0: return 0.0 else: return sign * math.pow(2, 1 - SWFStream.FLOAT16_EXPONENT_BASE) * (significand / 1024.0) if exponent == 31: if significand == 0: return float('-inf') if sign < 0 else float('inf') else: return float('nan') # normal number return sign * math.pow(2, exponent - SWFStream.FLOAT16_EXPONENT_BASE) * (1 + significand / 1024.0)
[ "def", "readFLOAT16", "(", "self", ")", ":", "self", ".", "reset_bits_pending", "(", ")", "word", "=", "self", ".", "readUI16", "(", ")", "sign", "=", "-", "1", "if", "(", "(", "word", "&", "0x8000", ")", "!=", "0", ")", "else", "1", "exponent", ...
Read a 2 byte float
[ "Read", "a", "2", "byte", "float" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L174-L192
train
timknip/pyswf
swf/stream.py
SWFStream.readSTYLECHANGERECORD
def readSTYLECHANGERECORD(self, states, fill_bits, line_bits, level = 1): """ Read a SWFShapeRecordStyleChange """ return SWFShapeRecordStyleChange(self, states, fill_bits, line_bits, level)
python
def readSTYLECHANGERECORD(self, states, fill_bits, line_bits, level = 1): """ Read a SWFShapeRecordStyleChange """ return SWFShapeRecordStyleChange(self, states, fill_bits, line_bits, level)
[ "def", "readSTYLECHANGERECORD", "(", "self", ",", "states", ",", "fill_bits", ",", "line_bits", ",", "level", "=", "1", ")", ":", "return", "SWFShapeRecordStyleChange", "(", "self", ",", "states", ",", "fill_bits", ",", "line_bits", ",", "level", ")" ]
Read a SWFShapeRecordStyleChange
[ "Read", "a", "SWFShapeRecordStyleChange" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L263-L265
train
timknip/pyswf
swf/stream.py
SWFStream.readTEXTRECORD
def readTEXTRECORD(self, glyphBits, advanceBits, previousRecord=None, level=1): """ Read a SWFTextRecord """ if self.readUI8() == 0: return None else: self.seek(self.tell() - 1) return SWFTextRecord(self, glyphBits, advanceBits, previousRecord, level)
python
def readTEXTRECORD(self, glyphBits, advanceBits, previousRecord=None, level=1): """ Read a SWFTextRecord """ if self.readUI8() == 0: return None else: self.seek(self.tell() - 1) return SWFTextRecord(self, glyphBits, advanceBits, previousRecord, level)
[ "def", "readTEXTRECORD", "(", "self", ",", "glyphBits", ",", "advanceBits", ",", "previousRecord", "=", "None", ",", "level", "=", "1", ")", ":", "if", "self", ".", "readUI8", "(", ")", "==", "0", ":", "return", "None", "else", ":", "self", ".", "see...
Read a SWFTextRecord
[ "Read", "a", "SWFTextRecord" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L271-L277
train
timknip/pyswf
swf/stream.py
SWFStream.readACTIONRECORD
def readACTIONRECORD(self): """ Read a SWFActionRecord """ action = None actionCode = self.readUI8() if actionCode != 0: actionLength = self.readUI16() if actionCode >= 0x80 else 0 #print "0x%x"%actionCode, actionLength action = SWFActionFactory.create(actionCode, actionLength) action.parse(self) return action
python
def readACTIONRECORD(self): """ Read a SWFActionRecord """ action = None actionCode = self.readUI8() if actionCode != 0: actionLength = self.readUI16() if actionCode >= 0x80 else 0 #print "0x%x"%actionCode, actionLength action = SWFActionFactory.create(actionCode, actionLength) action.parse(self) return action
[ "def", "readACTIONRECORD", "(", "self", ")", ":", "action", "=", "None", "actionCode", "=", "self", ".", "readUI8", "(", ")", "if", "actionCode", "!=", "0", ":", "actionLength", "=", "self", ".", "readUI16", "(", ")", "if", "actionCode", ">=", "0x80", ...
Read a SWFActionRecord
[ "Read", "a", "SWFActionRecord" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L307-L316
train
timknip/pyswf
swf/stream.py
SWFStream.readCLIPACTIONRECORD
def readCLIPACTIONRECORD(self, version): """ Read a SWFClipActionRecord """ pos = self.tell() flags = self.readUI32() if version >= 6 else self.readUI16() if flags == 0: return None else: self.seek(pos) return SWFClipActionRecord(self, version)
python
def readCLIPACTIONRECORD(self, version): """ Read a SWFClipActionRecord """ pos = self.tell() flags = self.readUI32() if version >= 6 else self.readUI16() if flags == 0: return None else: self.seek(pos) return SWFClipActionRecord(self, version)
[ "def", "readCLIPACTIONRECORD", "(", "self", ",", "version", ")", ":", "pos", "=", "self", ".", "tell", "(", ")", "flags", "=", "self", ".", "readUI32", "(", ")", "if", "version", ">=", "6", "else", "self", ".", "readUI16", "(", ")", "if", "flags", ...
Read a SWFClipActionRecord
[ "Read", "a", "SWFClipActionRecord" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L333-L341
train
timknip/pyswf
swf/stream.py
SWFStream.readRGB
def readRGB(self): """ Read a RGB color """ self.reset_bits_pending(); r = self.readUI8() g = self.readUI8() b = self.readUI8() return (0xff << 24) | (r << 16) | (g << 8) | b
python
def readRGB(self): """ Read a RGB color """ self.reset_bits_pending(); r = self.readUI8() g = self.readUI8() b = self.readUI8() return (0xff << 24) | (r << 16) | (g << 8) | b
[ "def", "readRGB", "(", "self", ")", ":", "self", ".", "reset_bits_pending", "(", ")", "r", "=", "self", ".", "readUI8", "(", ")", "g", "=", "self", ".", "readUI8", "(", ")", "b", "=", "self", ".", "readUI8", "(", ")", "return", "(", "0xff", "<<",...
Read a RGB color
[ "Read", "a", "RGB", "color" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L347-L353
train
timknip/pyswf
swf/stream.py
SWFStream.readRGBA
def readRGBA(self): """ Read a RGBA color """ self.reset_bits_pending(); r = self.readUI8() g = self.readUI8() b = self.readUI8() a = self.readUI8() return (a << 24) | (r << 16) | (g << 8) | b
python
def readRGBA(self): """ Read a RGBA color """ self.reset_bits_pending(); r = self.readUI8() g = self.readUI8() b = self.readUI8() a = self.readUI8() return (a << 24) | (r << 16) | (g << 8) | b
[ "def", "readRGBA", "(", "self", ")", ":", "self", ".", "reset_bits_pending", "(", ")", "r", "=", "self", ".", "readUI8", "(", ")", "g", "=", "self", ".", "readUI8", "(", ")", "b", "=", "self", ".", "readUI8", "(", ")", "a", "=", "self", ".", "r...
Read a RGBA color
[ "Read", "a", "RGBA", "color" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L355-L362
train
timknip/pyswf
swf/stream.py
SWFStream.readString
def readString(self): """ Read a string """ s = self.f.read(1) string = b"" while ord(s) > 0: string += s s = self.f.read(1) return string.decode()
python
def readString(self): """ Read a string """ s = self.f.read(1) string = b"" while ord(s) > 0: string += s s = self.f.read(1) return string.decode()
[ "def", "readString", "(", "self", ")", ":", "s", "=", "self", ".", "f", ".", "read", "(", "1", ")", "string", "=", "b\"\"", "while", "ord", "(", "s", ")", ">", "0", ":", "string", "+=", "s", "s", "=", "self", ".", "f", ".", "read", "(", "1"...
Read a string
[ "Read", "a", "string" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L368-L375
train
timknip/pyswf
swf/stream.py
SWFStream.readFILTER
def readFILTER(self): """ Read a SWFFilter """ filterId = self.readUI8() filter = SWFFilterFactory.create(filterId) filter.parse(self) return filter
python
def readFILTER(self): """ Read a SWFFilter """ filterId = self.readUI8() filter = SWFFilterFactory.create(filterId) filter.parse(self) return filter
[ "def", "readFILTER", "(", "self", ")", ":", "filterId", "=", "self", ".", "readUI8", "(", ")", "filter", "=", "SWFFilterFactory", ".", "create", "(", "filterId", ")", "filter", ".", "parse", "(", "self", ")", "return", "filter" ]
Read a SWFFilter
[ "Read", "a", "SWFFilter" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L377-L382
train
timknip/pyswf
swf/stream.py
SWFStream.readFILTERLIST
def readFILTERLIST(self): """ Read a length-prefixed list of FILTERs """ number = self.readUI8() return [self.readFILTER() for _ in range(number)]
python
def readFILTERLIST(self): """ Read a length-prefixed list of FILTERs """ number = self.readUI8() return [self.readFILTER() for _ in range(number)]
[ "def", "readFILTERLIST", "(", "self", ")", ":", "number", "=", "self", ".", "readUI8", "(", ")", "return", "[", "self", ".", "readFILTER", "(", ")", "for", "_", "in", "range", "(", "number", ")", "]" ]
Read a length-prefixed list of FILTERs
[ "Read", "a", "length", "-", "prefixed", "list", "of", "FILTERs" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L384-L387
train
timknip/pyswf
swf/stream.py
SWFStream.readBUTTONCONDACTIONSs
def readBUTTONCONDACTIONSs(self): """ Read zero or more button-condition actions """ out = [] while 1: action = self.readBUTTONCONDACTION() if action: out.append(action) else: break return out
python
def readBUTTONCONDACTIONSs(self): """ Read zero or more button-condition actions """ out = [] while 1: action = self.readBUTTONCONDACTION() if action: out.append(action) else: break return out
[ "def", "readBUTTONCONDACTIONSs", "(", "self", ")", ":", "out", "=", "[", "]", "while", "1", ":", "action", "=", "self", ".", "readBUTTONCONDACTION", "(", ")", "if", "action", ":", "out", ".", "append", "(", "action", ")", "else", ":", "break", "return"...
Read zero or more button-condition actions
[ "Read", "zero", "or", "more", "button", "-", "condition", "actions" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L427-L436
train
timknip/pyswf
swf/stream.py
SWFStream.readtag_header
def readtag_header(self): """ Read a tag header """ pos = self.tell() tag_type_and_length = self.readUI16() tag_length = tag_type_and_length & 0x003f if tag_length == 0x3f: # The SWF10 spec sez that this is a signed int. # Shouldn't it be an unsigned int? tag_length = self.readSI32(); return SWFRecordHeader(tag_type_and_length >> 6, tag_length, self.tell() - pos)
python
def readtag_header(self): """ Read a tag header """ pos = self.tell() tag_type_and_length = self.readUI16() tag_length = tag_type_and_length & 0x003f if tag_length == 0x3f: # The SWF10 spec sez that this is a signed int. # Shouldn't it be an unsigned int? tag_length = self.readSI32(); return SWFRecordHeader(tag_type_and_length >> 6, tag_length, self.tell() - pos)
[ "def", "readtag_header", "(", "self", ")", ":", "pos", "=", "self", ".", "tell", "(", ")", "tag_type_and_length", "=", "self", ".", "readUI16", "(", ")", "tag_length", "=", "tag_type_and_length", "&", "0x003f", "if", "tag_length", "==", "0x3f", ":", "tag_l...
Read a tag header
[ "Read", "a", "tag", "header" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L459-L468
train
timknip/pyswf
swf/tag.py
SWFTimelineContainer.get_dependencies
def get_dependencies(self): """ Returns the character ids this tag refers to """ s = super(SWFTimelineContainer, self).get_dependencies() for dt in self.all_tags_of_type(DefinitionTag): s.update(dt.get_dependencies()) return s
python
def get_dependencies(self): """ Returns the character ids this tag refers to """ s = super(SWFTimelineContainer, self).get_dependencies() for dt in self.all_tags_of_type(DefinitionTag): s.update(dt.get_dependencies()) return s
[ "def", "get_dependencies", "(", "self", ")", ":", "s", "=", "super", "(", "SWFTimelineContainer", ",", "self", ")", ".", "get_dependencies", "(", ")", "for", "dt", "in", "self", ".", "all_tags_of_type", "(", "DefinitionTag", ")", ":", "s", ".", "update", ...
Returns the character ids this tag refers to
[ "Returns", "the", "character", "ids", "this", "tag", "refers", "to" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/tag.py#L151-L156
train
timknip/pyswf
swf/tag.py
SWFTimelineContainer.all_tags_of_type
def all_tags_of_type(self, type_or_types, recurse_into_sprites = True): """ Generator for all tags of the given type_or_types. Generates in breadth-first order, optionally including all sub-containers. """ for t in self.tags: if isinstance(t, type_or_types): yield t if recurse_into_sprites: for t in self.tags: # recurse into nested sprites if isinstance(t, SWFTimelineContainer): for containedtag in t.all_tags_of_type(type_or_types): yield containedtag
python
def all_tags_of_type(self, type_or_types, recurse_into_sprites = True): """ Generator for all tags of the given type_or_types. Generates in breadth-first order, optionally including all sub-containers. """ for t in self.tags: if isinstance(t, type_or_types): yield t if recurse_into_sprites: for t in self.tags: # recurse into nested sprites if isinstance(t, SWFTimelineContainer): for containedtag in t.all_tags_of_type(type_or_types): yield containedtag
[ "def", "all_tags_of_type", "(", "self", ",", "type_or_types", ",", "recurse_into_sprites", "=", "True", ")", ":", "for", "t", "in", "self", ".", "tags", ":", "if", "isinstance", "(", "t", ",", "type_or_types", ")", ":", "yield", "t", "if", "recurse_into_sp...
Generator for all tags of the given type_or_types. Generates in breadth-first order, optionally including all sub-containers.
[ "Generator", "for", "all", "tags", "of", "the", "given", "type_or_types", "." ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/tag.py#L197-L211
train
timknip/pyswf
swf/tag.py
SWFTimelineContainer.build_dictionary
def build_dictionary(self): """ Return a dictionary of characterIds to their defining tags. """ d = {} for t in self.all_tags_of_type(DefinitionTag, recurse_into_sprites = False): if t.characterId in d: #print 'redefinition of characterId %d:' % (t.characterId) #print ' was:', d[t.characterId] #print 'redef:', t raise ValueError('illegal redefinition of character') d[t.characterId] = t return d
python
def build_dictionary(self): """ Return a dictionary of characterIds to their defining tags. """ d = {} for t in self.all_tags_of_type(DefinitionTag, recurse_into_sprites = False): if t.characterId in d: #print 'redefinition of characterId %d:' % (t.characterId) #print ' was:', d[t.characterId] #print 'redef:', t raise ValueError('illegal redefinition of character') d[t.characterId] = t return d
[ "def", "build_dictionary", "(", "self", ")", ":", "d", "=", "{", "}", "for", "t", "in", "self", ".", "all_tags_of_type", "(", "DefinitionTag", ",", "recurse_into_sprites", "=", "False", ")", ":", "if", "t", ".", "characterId", "in", "d", ":", "raise", ...
Return a dictionary of characterIds to their defining tags.
[ "Return", "a", "dictionary", "of", "characterIds", "to", "their", "defining", "tags", "." ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/tag.py#L213-L225
train
timknip/pyswf
swf/tag.py
SWFTimelineContainer.collect_sound_streams
def collect_sound_streams(self): """ Return a list of sound streams in this timeline and its children. The streams are returned in order with respect to the timeline. A stream is returned as a list: the first element is the tag which introduced that stream; other elements are the tags which made up the stream body (if any). """ rc = [] current_stream = None # looking in all containers for frames for tag in self.all_tags_of_type((TagSoundStreamHead, TagSoundStreamBlock)): if isinstance(tag, TagSoundStreamHead): # we have a new stream current_stream = [ tag ] rc.append(current_stream) if isinstance(tag, TagSoundStreamBlock): # we have a frame for the current stream current_stream.append(tag) return rc
python
def collect_sound_streams(self): """ Return a list of sound streams in this timeline and its children. The streams are returned in order with respect to the timeline. A stream is returned as a list: the first element is the tag which introduced that stream; other elements are the tags which made up the stream body (if any). """ rc = [] current_stream = None # looking in all containers for frames for tag in self.all_tags_of_type((TagSoundStreamHead, TagSoundStreamBlock)): if isinstance(tag, TagSoundStreamHead): # we have a new stream current_stream = [ tag ] rc.append(current_stream) if isinstance(tag, TagSoundStreamBlock): # we have a frame for the current stream current_stream.append(tag) return rc
[ "def", "collect_sound_streams", "(", "self", ")", ":", "rc", "=", "[", "]", "current_stream", "=", "None", "for", "tag", "in", "self", ".", "all_tags_of_type", "(", "(", "TagSoundStreamHead", ",", "TagSoundStreamBlock", ")", ")", ":", "if", "isinstance", "("...
Return a list of sound streams in this timeline and its children. The streams are returned in order with respect to the timeline. A stream is returned as a list: the first element is the tag which introduced that stream; other elements are the tags which made up the stream body (if any).
[ "Return", "a", "list", "of", "sound", "streams", "in", "this", "timeline", "and", "its", "children", ".", "The", "streams", "are", "returned", "in", "order", "with", "respect", "to", "the", "timeline", "." ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/tag.py#L227-L247
train
timknip/pyswf
swf/tag.py
SWFTimelineContainer.collect_video_streams
def collect_video_streams(self): """ Return a list of video streams in this timeline and its children. The streams are returned in order with respect to the timeline. A stream is returned as a list: the first element is the tag which introduced that stream; other elements are the tags which made up the stream body (if any). """ rc = [] streams_by_id = {} # scan first for all streams for t in self.all_tags_of_type(TagDefineVideoStream): stream = [ t ] streams_by_id[t.characterId] = stream rc.append(stream) # then find the frames for t in self.all_tags_of_type(TagVideoFrame): # we have a frame for the /named/ stream assert t.streamId in streams_by_id streams_by_id[t.streamId].append(t) return rc
python
def collect_video_streams(self): """ Return a list of video streams in this timeline and its children. The streams are returned in order with respect to the timeline. A stream is returned as a list: the first element is the tag which introduced that stream; other elements are the tags which made up the stream body (if any). """ rc = [] streams_by_id = {} # scan first for all streams for t in self.all_tags_of_type(TagDefineVideoStream): stream = [ t ] streams_by_id[t.characterId] = stream rc.append(stream) # then find the frames for t in self.all_tags_of_type(TagVideoFrame): # we have a frame for the /named/ stream assert t.streamId in streams_by_id streams_by_id[t.streamId].append(t) return rc
[ "def", "collect_video_streams", "(", "self", ")", ":", "rc", "=", "[", "]", "streams_by_id", "=", "{", "}", "for", "t", "in", "self", ".", "all_tags_of_type", "(", "TagDefineVideoStream", ")", ":", "stream", "=", "[", "t", "]", "streams_by_id", "[", "t",...
Return a list of video streams in this timeline and its children. The streams are returned in order with respect to the timeline. A stream is returned as a list: the first element is the tag which introduced that stream; other elements are the tags which made up the stream body (if any).
[ "Return", "a", "list", "of", "video", "streams", "in", "this", "timeline", "and", "its", "children", ".", "The", "streams", "are", "returned", "in", "order", "with", "respect", "to", "the", "timeline", "." ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/tag.py#L249-L273
train
timknip/pyswf
swf/export.py
SVGExporter.export
def export(self, swf, force_stroke=False): """ Exports the specified SWF to SVG. @param swf The SWF. @param force_stroke Whether to force strokes on non-stroked fills. """ self.svg = self._e.svg(version=SVG_VERSION) self.force_stroke = force_stroke self.defs = self._e.defs() self.root = self._e.g() self.svg.append(self.defs) self.svg.append(self.root) self.shape_exporter.defs = self.defs self._num_filters = 0 self.fonts = dict([(x.characterId,x) for x in swf.all_tags_of_type(TagDefineFont)]) self.fontInfos = dict([(x.characterId,x) for x in swf.all_tags_of_type(TagDefineFontInfo)]) # GO! super(SVGExporter, self).export(swf, force_stroke) # Setup svg @width, @height and @viewBox # and add the optional margin self.bounds = SVGBounds(self.svg) self.svg.set("width", "%dpx" % round(self.bounds.width)) self.svg.set("height", "%dpx" % round(self.bounds.height)) if self._margin > 0: self.bounds.grow(self._margin) vb = [self.bounds.minx, self.bounds.miny, self.bounds.width, self.bounds.height] self.svg.set("viewBox", "%s" % " ".join(map(str,vb))) # Return the SVG as StringIO return self._serialize()
python
def export(self, swf, force_stroke=False): """ Exports the specified SWF to SVG. @param swf The SWF. @param force_stroke Whether to force strokes on non-stroked fills. """ self.svg = self._e.svg(version=SVG_VERSION) self.force_stroke = force_stroke self.defs = self._e.defs() self.root = self._e.g() self.svg.append(self.defs) self.svg.append(self.root) self.shape_exporter.defs = self.defs self._num_filters = 0 self.fonts = dict([(x.characterId,x) for x in swf.all_tags_of_type(TagDefineFont)]) self.fontInfos = dict([(x.characterId,x) for x in swf.all_tags_of_type(TagDefineFontInfo)]) # GO! super(SVGExporter, self).export(swf, force_stroke) # Setup svg @width, @height and @viewBox # and add the optional margin self.bounds = SVGBounds(self.svg) self.svg.set("width", "%dpx" % round(self.bounds.width)) self.svg.set("height", "%dpx" % round(self.bounds.height)) if self._margin > 0: self.bounds.grow(self._margin) vb = [self.bounds.minx, self.bounds.miny, self.bounds.width, self.bounds.height] self.svg.set("viewBox", "%s" % " ".join(map(str,vb))) # Return the SVG as StringIO return self._serialize()
[ "def", "export", "(", "self", ",", "swf", ",", "force_stroke", "=", "False", ")", ":", "self", ".", "svg", "=", "self", ".", "_e", ".", "svg", "(", "version", "=", "SVG_VERSION", ")", "self", ".", "force_stroke", "=", "force_stroke", "self", ".", "de...
Exports the specified SWF to SVG. @param swf The SWF. @param force_stroke Whether to force strokes on non-stroked fills.
[ "Exports", "the", "specified", "SWF", "to", "SVG", "." ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/export.py#L514-L546
train
timknip/pyswf
swf/export.py
SingleShapeSVGExporterMixin.export
def export(self, swf, shape, **export_opts): """ Exports the specified shape of the SWF to SVG. @param swf The SWF. @param shape Which shape to export, either by characterId(int) or as a Tag object. """ # If `shape` is given as int, find corresponding shape tag. if isinstance(shape, Tag): shape_tag = shape else: shapes = [x for x in swf.all_tags_of_type((TagDefineShape, TagDefineSprite)) if x.characterId == shape] if len(shapes): shape_tag = shapes[0] else: raise Exception("Shape %s not found" % shape) from swf.movie import SWF # find a typical use of this shape example_place_objects = [x for x in swf.all_tags_of_type(TagPlaceObject) if x.hasCharacter and x.characterId == shape_tag.characterId] if len(example_place_objects): place_object = example_place_objects[0] characters = swf.build_dictionary() ids_to_export = place_object.get_dependencies() ids_exported = set() tags_to_export = [] # this had better form a dag! while len(ids_to_export): id = ids_to_export.pop() if id in ids_exported or id not in characters: continue tag = characters[id] ids_to_export.update(tag.get_dependencies()) tags_to_export.append(tag) ids_exported.add(id) tags_to_export.reverse() tags_to_export.append(place_object) else: place_object = TagPlaceObject() place_object.hasCharacter = True place_object.characterId = shape_tag.characterId tags_to_export = [ shape_tag, place_object ] stunt_swf = SWF() stunt_swf.tags = tags_to_export return super(SingleShapeSVGExporterMixin, self).export(stunt_swf, **export_opts)
python
def export(self, swf, shape, **export_opts): """ Exports the specified shape of the SWF to SVG. @param swf The SWF. @param shape Which shape to export, either by characterId(int) or as a Tag object. """ # If `shape` is given as int, find corresponding shape tag. if isinstance(shape, Tag): shape_tag = shape else: shapes = [x for x in swf.all_tags_of_type((TagDefineShape, TagDefineSprite)) if x.characterId == shape] if len(shapes): shape_tag = shapes[0] else: raise Exception("Shape %s not found" % shape) from swf.movie import SWF # find a typical use of this shape example_place_objects = [x for x in swf.all_tags_of_type(TagPlaceObject) if x.hasCharacter and x.characterId == shape_tag.characterId] if len(example_place_objects): place_object = example_place_objects[0] characters = swf.build_dictionary() ids_to_export = place_object.get_dependencies() ids_exported = set() tags_to_export = [] # this had better form a dag! while len(ids_to_export): id = ids_to_export.pop() if id in ids_exported or id not in characters: continue tag = characters[id] ids_to_export.update(tag.get_dependencies()) tags_to_export.append(tag) ids_exported.add(id) tags_to_export.reverse() tags_to_export.append(place_object) else: place_object = TagPlaceObject() place_object.hasCharacter = True place_object.characterId = shape_tag.characterId tags_to_export = [ shape_tag, place_object ] stunt_swf = SWF() stunt_swf.tags = tags_to_export return super(SingleShapeSVGExporterMixin, self).export(stunt_swf, **export_opts)
[ "def", "export", "(", "self", ",", "swf", ",", "shape", ",", "**", "export_opts", ")", ":", "if", "isinstance", "(", "shape", ",", "Tag", ")", ":", "shape_tag", "=", "shape", "else", ":", "shapes", "=", "[", "x", "for", "x", "in", "swf", ".", "al...
Exports the specified shape of the SWF to SVG. @param swf The SWF. @param shape Which shape to export, either by characterId(int) or as a Tag object.
[ "Exports", "the", "specified", "shape", "of", "the", "SWF", "to", "SVG", "." ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/export.py#L827-L876
train
timknip/pyswf
swf/export.py
FrameSVGExporterMixin.export
def export(self, swf, frame, **export_opts): """ Exports a frame of the specified SWF to SVG. @param swf The SWF. @param frame Which frame to export, by 0-based index (int) """ self.wanted_frame = frame return super(FrameSVGExporterMixin, self).export(swf, *export_opts)
python
def export(self, swf, frame, **export_opts): """ Exports a frame of the specified SWF to SVG. @param swf The SWF. @param frame Which frame to export, by 0-based index (int) """ self.wanted_frame = frame return super(FrameSVGExporterMixin, self).export(swf, *export_opts)
[ "def", "export", "(", "self", ",", "swf", ",", "frame", ",", "**", "export_opts", ")", ":", "self", ".", "wanted_frame", "=", "frame", "return", "super", "(", "FrameSVGExporterMixin", ",", "self", ")", ".", "export", "(", "swf", ",", "*", "export_opts", ...
Exports a frame of the specified SWF to SVG. @param swf The SWF. @param frame Which frame to export, by 0-based index (int)
[ "Exports", "a", "frame", "of", "the", "specified", "SWF", "to", "SVG", "." ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/export.py#L879-L886
train
scikit-hep/probfit
probfit/plotting.py
_get_args_and_errors
def _get_args_and_errors(self, minuit=None, args=None, errors=None): """ consistent algorithm to get argument and errors 1) get it from minuit if minuit is available 2) if not get it from args and errors 2.1) if args is dict parse it. 3) if all else fail get it from self.last_arg """ ret_arg = None ret_error = None if minuit is not None: # case 1 ret_arg = minuit.args ret_error = minuit.errors return ret_arg, ret_error # no minuit specified use args and errors if args is not None: if isinstance(args, dict): ret_arg = parse_arg(self, args) else: ret_arg = args else: # case 3 ret_arg = self.last_arg if errors is not None: ret_error = errors return ret_arg, ret_error
python
def _get_args_and_errors(self, minuit=None, args=None, errors=None): """ consistent algorithm to get argument and errors 1) get it from minuit if minuit is available 2) if not get it from args and errors 2.1) if args is dict parse it. 3) if all else fail get it from self.last_arg """ ret_arg = None ret_error = None if minuit is not None: # case 1 ret_arg = minuit.args ret_error = minuit.errors return ret_arg, ret_error # no minuit specified use args and errors if args is not None: if isinstance(args, dict): ret_arg = parse_arg(self, args) else: ret_arg = args else: # case 3 ret_arg = self.last_arg if errors is not None: ret_error = errors return ret_arg, ret_error
[ "def", "_get_args_and_errors", "(", "self", ",", "minuit", "=", "None", ",", "args", "=", "None", ",", "errors", "=", "None", ")", ":", "ret_arg", "=", "None", "ret_error", "=", "None", "if", "minuit", "is", "not", "None", ":", "ret_arg", "=", "minuit"...
consistent algorithm to get argument and errors 1) get it from minuit if minuit is available 2) if not get it from args and errors 2.1) if args is dict parse it. 3) if all else fail get it from self.last_arg
[ "consistent", "algorithm", "to", "get", "argument", "and", "errors", "1", ")", "get", "it", "from", "minuit", "if", "minuit", "is", "available", "2", ")", "if", "not", "get", "it", "from", "args", "and", "errors", "2", ".", "1", ")", "if", "args", "i...
de3593798ea3877dd2785062bed6877dd9058a02
https://github.com/scikit-hep/probfit/blob/de3593798ea3877dd2785062bed6877dd9058a02/probfit/plotting.py#L28-L55
train
scikit-hep/probfit
probfit/plotting.py
draw_residual
def draw_residual(x, y, yerr, xerr, show_errbars=True, ax=None, zero_line=True, grid=True, **kwargs): """Draw a residual plot on the axis. By default, if show_errbars if True, residuals are drawn as blue points with errorbars with no endcaps. If show_errbars is False, residuals are drawn as a bar graph with black bars. **Arguments** - **x** array of numbers, x-coordinates - **y** array of numbers, y-coordinates - **yerr** array of numbers, the uncertainty on the y-values - **xerr** array of numbers, the uncertainty on the x-values - **show_errbars** If True, draw the data as a bar plot, else as an errorbar plot - **ax** Optional matplotlib axis instance on which to draw the plot - **zero_line** If True, draw a red line at :math:`y = 0` along the full extent in :math:`x` - **grid** If True, draw gridlines - **kwargs** passed to ``ax.errorbar`` (if ``show_errbars`` is True) or ``ax.bar`` (if ``show_errbars`` if False) **Returns** The matplotlib axis instance the plot was drawn on. """ from matplotlib import pyplot as plt ax = plt.gca() if ax is None else ax if show_errbars: plotopts = dict(fmt='b.', capsize=0) plotopts.update(kwargs) pp = ax.errorbar(x, y, yerr, xerr, zorder=0, **plotopts) else: plotopts = dict(color='k') plotopts.update(kwargs) pp = ax.bar(x - xerr, y, width=2*xerr, **plotopts) if zero_line: ax.plot([x[0] - xerr[0], x[-1] + xerr[-1]], [0, 0], 'r-', zorder=2) # Take the `grid` kwarg to mean 'add a grid if True'; if grid is False and # we called ax.grid(False) then any existing grid on ax would be turned off if grid: ax.grid(grid) return ax
python
def draw_residual(x, y, yerr, xerr, show_errbars=True, ax=None, zero_line=True, grid=True, **kwargs): """Draw a residual plot on the axis. By default, if show_errbars if True, residuals are drawn as blue points with errorbars with no endcaps. If show_errbars is False, residuals are drawn as a bar graph with black bars. **Arguments** - **x** array of numbers, x-coordinates - **y** array of numbers, y-coordinates - **yerr** array of numbers, the uncertainty on the y-values - **xerr** array of numbers, the uncertainty on the x-values - **show_errbars** If True, draw the data as a bar plot, else as an errorbar plot - **ax** Optional matplotlib axis instance on which to draw the plot - **zero_line** If True, draw a red line at :math:`y = 0` along the full extent in :math:`x` - **grid** If True, draw gridlines - **kwargs** passed to ``ax.errorbar`` (if ``show_errbars`` is True) or ``ax.bar`` (if ``show_errbars`` if False) **Returns** The matplotlib axis instance the plot was drawn on. """ from matplotlib import pyplot as plt ax = plt.gca() if ax is None else ax if show_errbars: plotopts = dict(fmt='b.', capsize=0) plotopts.update(kwargs) pp = ax.errorbar(x, y, yerr, xerr, zorder=0, **plotopts) else: plotopts = dict(color='k') plotopts.update(kwargs) pp = ax.bar(x - xerr, y, width=2*xerr, **plotopts) if zero_line: ax.plot([x[0] - xerr[0], x[-1] + xerr[-1]], [0, 0], 'r-', zorder=2) # Take the `grid` kwarg to mean 'add a grid if True'; if grid is False and # we called ax.grid(False) then any existing grid on ax would be turned off if grid: ax.grid(grid) return ax
[ "def", "draw_residual", "(", "x", ",", "y", ",", "yerr", ",", "xerr", ",", "show_errbars", "=", "True", ",", "ax", "=", "None", ",", "zero_line", "=", "True", ",", "grid", "=", "True", ",", "**", "kwargs", ")", ":", "from", "matplotlib", "import", ...
Draw a residual plot on the axis. By default, if show_errbars if True, residuals are drawn as blue points with errorbars with no endcaps. If show_errbars is False, residuals are drawn as a bar graph with black bars. **Arguments** - **x** array of numbers, x-coordinates - **y** array of numbers, y-coordinates - **yerr** array of numbers, the uncertainty on the y-values - **xerr** array of numbers, the uncertainty on the x-values - **show_errbars** If True, draw the data as a bar plot, else as an errorbar plot - **ax** Optional matplotlib axis instance on which to draw the plot - **zero_line** If True, draw a red line at :math:`y = 0` along the full extent in :math:`x` - **grid** If True, draw gridlines - **kwargs** passed to ``ax.errorbar`` (if ``show_errbars`` is True) or ``ax.bar`` (if ``show_errbars`` if False) **Returns** The matplotlib axis instance the plot was drawn on.
[ "Draw", "a", "residual", "plot", "on", "the", "axis", "." ]
de3593798ea3877dd2785062bed6877dd9058a02
https://github.com/scikit-hep/probfit/blob/de3593798ea3877dd2785062bed6877dd9058a02/probfit/plotting.py#L135-L193
train
scikit-hep/probfit
probfit/plotting.py
draw_pdf
def draw_pdf(f, arg, bound, bins=100, scale=1.0, density=True, normed_pdf=False, ax=None, **kwds): """ draw pdf with given argument and bounds. **Arguments** * **f** your pdf. The first argument is assumed to be independent variable * **arg** argument can be tuple or list * **bound** tuple(xmin,xmax) * **bins** number of bins to plot pdf. Default 100. * **scale** multiply pdf by given number. Default 1.0. * **density** plot density instead of expected count in each bin (pdf*bin width). Default True. * **normed_pdf** Normalize pdf in given bound. Default False * The rest of keyword argument will be pass to pyplot.plot **Returns** x, y of what's being plot """ edges = np.linspace(bound[0], bound[1], bins) return draw_pdf_with_edges(f, arg, edges, ax=ax, scale=scale, density=density, normed_pdf=normed_pdf, **kwds)
python
def draw_pdf(f, arg, bound, bins=100, scale=1.0, density=True, normed_pdf=False, ax=None, **kwds): """ draw pdf with given argument and bounds. **Arguments** * **f** your pdf. The first argument is assumed to be independent variable * **arg** argument can be tuple or list * **bound** tuple(xmin,xmax) * **bins** number of bins to plot pdf. Default 100. * **scale** multiply pdf by given number. Default 1.0. * **density** plot density instead of expected count in each bin (pdf*bin width). Default True. * **normed_pdf** Normalize pdf in given bound. Default False * The rest of keyword argument will be pass to pyplot.plot **Returns** x, y of what's being plot """ edges = np.linspace(bound[0], bound[1], bins) return draw_pdf_with_edges(f, arg, edges, ax=ax, scale=scale, density=density, normed_pdf=normed_pdf, **kwds)
[ "def", "draw_pdf", "(", "f", ",", "arg", ",", "bound", ",", "bins", "=", "100", ",", "scale", "=", "1.0", ",", "density", "=", "True", ",", "normed_pdf", "=", "False", ",", "ax", "=", "None", ",", "**", "kwds", ")", ":", "edges", "=", "np", "."...
draw pdf with given argument and bounds. **Arguments** * **f** your pdf. The first argument is assumed to be independent variable * **arg** argument can be tuple or list * **bound** tuple(xmin,xmax) * **bins** number of bins to plot pdf. Default 100. * **scale** multiply pdf by given number. Default 1.0. * **density** plot density instead of expected count in each bin (pdf*bin width). Default True. * **normed_pdf** Normalize pdf in given bound. Default False * The rest of keyword argument will be pass to pyplot.plot **Returns** x, y of what's being plot
[ "draw", "pdf", "with", "given", "argument", "and", "bounds", "." ]
de3593798ea3877dd2785062bed6877dd9058a02
https://github.com/scikit-hep/probfit/blob/de3593798ea3877dd2785062bed6877dd9058a02/probfit/plotting.py#L519-L550
train
jmcarp/betfair.py
betfair/utils.py
get_chunks
def get_chunks(sequence, chunk_size): """Split sequence into chunks. :param list sequence: :param int chunk_size: """ return [ sequence[idx:idx + chunk_size] for idx in range(0, len(sequence), chunk_size) ]
python
def get_chunks(sequence, chunk_size): """Split sequence into chunks. :param list sequence: :param int chunk_size: """ return [ sequence[idx:idx + chunk_size] for idx in range(0, len(sequence), chunk_size) ]
[ "def", "get_chunks", "(", "sequence", ",", "chunk_size", ")", ":", "return", "[", "sequence", "[", "idx", ":", "idx", "+", "chunk_size", "]", "for", "idx", "in", "range", "(", "0", ",", "len", "(", "sequence", ")", ",", "chunk_size", ")", "]" ]
Split sequence into chunks. :param list sequence: :param int chunk_size:
[ "Split", "sequence", "into", "chunks", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/utils.py#L19-L28
train
jmcarp/betfair.py
betfair/utils.py
get_kwargs
def get_kwargs(kwargs): """Get all keys and values from dictionary where key is not `self`. :param dict kwargs: Input parameters """ return { key: value for key, value in six.iteritems(kwargs) if key != 'self' }
python
def get_kwargs(kwargs): """Get all keys and values from dictionary where key is not `self`. :param dict kwargs: Input parameters """ return { key: value for key, value in six.iteritems(kwargs) if key != 'self' }
[ "def", "get_kwargs", "(", "kwargs", ")", ":", "return", "{", "key", ":", "value", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "kwargs", ")", "if", "key", "!=", "'self'", "}" ]
Get all keys and values from dictionary where key is not `self`. :param dict kwargs: Input parameters
[ "Get", "all", "keys", "and", "values", "from", "dictionary", "where", "key", "is", "not", "self", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/utils.py#L31-L39
train
jmcarp/betfair.py
betfair/utils.py
check_status_code
def check_status_code(response, codes=None): """Check HTTP status code and raise exception if incorrect. :param Response response: HTTP response :param codes: List of accepted codes or callable :raises: ApiError if code invalid """ codes = codes or [httplib.OK] checker = ( codes if callable(codes) else lambda resp: resp.status_code in codes ) if not checker(response): raise exceptions.ApiError(response, response.json())
python
def check_status_code(response, codes=None): """Check HTTP status code and raise exception if incorrect. :param Response response: HTTP response :param codes: List of accepted codes or callable :raises: ApiError if code invalid """ codes = codes or [httplib.OK] checker = ( codes if callable(codes) else lambda resp: resp.status_code in codes ) if not checker(response): raise exceptions.ApiError(response, response.json())
[ "def", "check_status_code", "(", "response", ",", "codes", "=", "None", ")", ":", "codes", "=", "codes", "or", "[", "httplib", ".", "OK", "]", "checker", "=", "(", "codes", "if", "callable", "(", "codes", ")", "else", "lambda", "resp", ":", "resp", "...
Check HTTP status code and raise exception if incorrect. :param Response response: HTTP response :param codes: List of accepted codes or callable :raises: ApiError if code invalid
[ "Check", "HTTP", "status", "code", "and", "raise", "exception", "if", "incorrect", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/utils.py#L42-L56
train
jmcarp/betfair.py
betfair/utils.py
result_or_error
def result_or_error(response): """Get `result` field from Betfair response or raise exception if not found. :param Response response: :raises: ApiError if no results passed """ data = response.json() result = data.get('result') if result is not None: return result raise exceptions.ApiError(response, data)
python
def result_or_error(response): """Get `result` field from Betfair response or raise exception if not found. :param Response response: :raises: ApiError if no results passed """ data = response.json() result = data.get('result') if result is not None: return result raise exceptions.ApiError(response, data)
[ "def", "result_or_error", "(", "response", ")", ":", "data", "=", "response", ".", "json", "(", ")", "result", "=", "data", ".", "get", "(", "'result'", ")", "if", "result", "is", "not", "None", ":", "return", "result", "raise", "exceptions", ".", "Api...
Get `result` field from Betfair response or raise exception if not found. :param Response response: :raises: ApiError if no results passed
[ "Get", "result", "field", "from", "Betfair", "response", "or", "raise", "exception", "if", "not", "found", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/utils.py#L59-L70
train
jmcarp/betfair.py
betfair/utils.py
make_payload
def make_payload(base, method, params): """Build Betfair JSON-RPC payload. :param str base: Betfair base ("Sports" or "Account") :param str method: Betfair endpoint :param dict params: Request parameters """ payload = { 'jsonrpc': '2.0', 'method': '{base}APING/v1.0/{method}'.format(**locals()), 'params': utils.serialize_dict(params), 'id': 1, } return payload
python
def make_payload(base, method, params): """Build Betfair JSON-RPC payload. :param str base: Betfair base ("Sports" or "Account") :param str method: Betfair endpoint :param dict params: Request parameters """ payload = { 'jsonrpc': '2.0', 'method': '{base}APING/v1.0/{method}'.format(**locals()), 'params': utils.serialize_dict(params), 'id': 1, } return payload
[ "def", "make_payload", "(", "base", ",", "method", ",", "params", ")", ":", "payload", "=", "{", "'jsonrpc'", ":", "'2.0'", ",", "'method'", ":", "'{base}APING/v1.0/{method}'", ".", "format", "(", "**", "locals", "(", ")", ")", ",", "'params'", ":", "uti...
Build Betfair JSON-RPC payload. :param str base: Betfair base ("Sports" or "Account") :param str method: Betfair endpoint :param dict params: Request parameters
[ "Build", "Betfair", "JSON", "-", "RPC", "payload", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/utils.py#L100-L113
train
jmcarp/betfair.py
betfair/utils.py
requires_login
def requires_login(func, *args, **kwargs): """Decorator to check that the user is logged in. Raises `BetfairError` if instance variable `session_token` is absent. """ self = args[0] if self.session_token: return func(*args, **kwargs) raise exceptions.NotLoggedIn()
python
def requires_login(func, *args, **kwargs): """Decorator to check that the user is logged in. Raises `BetfairError` if instance variable `session_token` is absent. """ self = args[0] if self.session_token: return func(*args, **kwargs) raise exceptions.NotLoggedIn()
[ "def", "requires_login", "(", "func", ",", "*", "args", ",", "**", "kwargs", ")", ":", "self", "=", "args", "[", "0", "]", "if", "self", ".", "session_token", ":", "return", "func", "(", "*", "args", ",", "**", "kwargs", ")", "raise", "exceptions", ...
Decorator to check that the user is logged in. Raises `BetfairError` if instance variable `session_token` is absent.
[ "Decorator", "to", "check", "that", "the", "user", "is", "logged", "in", ".", "Raises", "BetfairError", "if", "instance", "variable", "session_token", "is", "absent", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/utils.py#L117-L124
train
jmcarp/betfair.py
betfair/price.py
nearest_price
def nearest_price(price, cutoffs=CUTOFFS): """Returns the nearest Betfair odds value to price. Adapted from Anton Zemlyanov's AlgoTrader project (MIT licensed). https://github.com/AlgoTrader/betfair-sports-api/blob/master/lib/betfair_price.js :param float price: Approximate Betfair price (i.e. decimal odds value) :param tuple cutoffs: Optional tuple of (cutoff, step) pairs :returns: The nearest Befair price :rtype: float """ if price <= MIN_PRICE: return MIN_PRICE if price > MAX_PRICE: return MAX_PRICE price = as_dec(price) for cutoff, step in cutoffs: if price < cutoff: break step = as_dec(step) return float((price * step).quantize(2, ROUND_HALF_UP) / step)
python
def nearest_price(price, cutoffs=CUTOFFS): """Returns the nearest Betfair odds value to price. Adapted from Anton Zemlyanov's AlgoTrader project (MIT licensed). https://github.com/AlgoTrader/betfair-sports-api/blob/master/lib/betfair_price.js :param float price: Approximate Betfair price (i.e. decimal odds value) :param tuple cutoffs: Optional tuple of (cutoff, step) pairs :returns: The nearest Befair price :rtype: float """ if price <= MIN_PRICE: return MIN_PRICE if price > MAX_PRICE: return MAX_PRICE price = as_dec(price) for cutoff, step in cutoffs: if price < cutoff: break step = as_dec(step) return float((price * step).quantize(2, ROUND_HALF_UP) / step)
[ "def", "nearest_price", "(", "price", ",", "cutoffs", "=", "CUTOFFS", ")", ":", "if", "price", "<=", "MIN_PRICE", ":", "return", "MIN_PRICE", "if", "price", ">", "MAX_PRICE", ":", "return", "MAX_PRICE", "price", "=", "as_dec", "(", "price", ")", "for", "...
Returns the nearest Betfair odds value to price. Adapted from Anton Zemlyanov's AlgoTrader project (MIT licensed). https://github.com/AlgoTrader/betfair-sports-api/blob/master/lib/betfair_price.js :param float price: Approximate Betfair price (i.e. decimal odds value) :param tuple cutoffs: Optional tuple of (cutoff, step) pairs :returns: The nearest Befair price :rtype: float
[ "Returns", "the", "nearest", "Betfair", "odds", "value", "to", "price", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/price.py#L49-L70
train
jmcarp/betfair.py
betfair/betfair.py
Betfair.login
def login(self, username, password): """Log in to Betfair. Sets `session_token` if successful. :param str username: Username :param str password: Password :raises: BetfairLoginError """ response = self.session.post( os.path.join(self.identity_url, 'certlogin'), cert=self.cert_file, data=urllib.urlencode({ 'username': username, 'password': password, }), headers={ 'X-Application': self.app_key, 'Content-Type': 'application/x-www-form-urlencoded', }, timeout=self.timeout, ) utils.check_status_code(response, [httplib.OK]) data = response.json() if data.get('loginStatus') != 'SUCCESS': raise exceptions.LoginError(response, data) self.session_token = data['sessionToken']
python
def login(self, username, password): """Log in to Betfair. Sets `session_token` if successful. :param str username: Username :param str password: Password :raises: BetfairLoginError """ response = self.session.post( os.path.join(self.identity_url, 'certlogin'), cert=self.cert_file, data=urllib.urlencode({ 'username': username, 'password': password, }), headers={ 'X-Application': self.app_key, 'Content-Type': 'application/x-www-form-urlencoded', }, timeout=self.timeout, ) utils.check_status_code(response, [httplib.OK]) data = response.json() if data.get('loginStatus') != 'SUCCESS': raise exceptions.LoginError(response, data) self.session_token = data['sessionToken']
[ "def", "login", "(", "self", ",", "username", ",", "password", ")", ":", "response", "=", "self", ".", "session", ".", "post", "(", "os", ".", "path", ".", "join", "(", "self", ".", "identity_url", ",", "'certlogin'", ")", ",", "cert", "=", "self", ...
Log in to Betfair. Sets `session_token` if successful. :param str username: Username :param str password: Password :raises: BetfairLoginError
[ "Log", "in", "to", "Betfair", ".", "Sets", "session_token", "if", "successful", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/betfair.py#L93-L117
train
jmcarp/betfair.py
betfair/betfair.py
Betfair.list_market_profit_and_loss
def list_market_profit_and_loss( self, market_ids, include_settled_bets=False, include_bsp_bets=None, net_of_commission=None): """Retrieve profit and loss for a given list of markets. :param list market_ids: List of markets to calculate profit and loss :param bool include_settled_bets: Option to include settled bets :param bool include_bsp_bets: Option to include BSP bets :param bool net_of_commission: Option to return profit and loss net of users current commission rate for this market including any special tariffs """ return self.make_api_request( 'Sports', 'listMarketProfitAndLoss', utils.get_kwargs(locals()), model=models.MarketProfitAndLoss, )
python
def list_market_profit_and_loss( self, market_ids, include_settled_bets=False, include_bsp_bets=None, net_of_commission=None): """Retrieve profit and loss for a given list of markets. :param list market_ids: List of markets to calculate profit and loss :param bool include_settled_bets: Option to include settled bets :param bool include_bsp_bets: Option to include BSP bets :param bool net_of_commission: Option to return profit and loss net of users current commission rate for this market including any special tariffs """ return self.make_api_request( 'Sports', 'listMarketProfitAndLoss', utils.get_kwargs(locals()), model=models.MarketProfitAndLoss, )
[ "def", "list_market_profit_and_loss", "(", "self", ",", "market_ids", ",", "include_settled_bets", "=", "False", ",", "include_bsp_bets", "=", "None", ",", "net_of_commission", "=", "None", ")", ":", "return", "self", ".", "make_api_request", "(", "'Sports'", ",",...
Retrieve profit and loss for a given list of markets. :param list market_ids: List of markets to calculate profit and loss :param bool include_settled_bets: Option to include settled bets :param bool include_bsp_bets: Option to include BSP bets :param bool net_of_commission: Option to return profit and loss net of users current commission rate for this market including any special tariffs
[ "Retrieve", "profit", "and", "loss", "for", "a", "given", "list", "of", "markets", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/betfair.py#L284-L301
train
jmcarp/betfair.py
betfair/betfair.py
Betfair.iter_list_market_book
def iter_list_market_book(self, market_ids, chunk_size, **kwargs): """Split call to `list_market_book` into separate requests. :param list market_ids: List of market IDs :param int chunk_size: Number of records per chunk :param dict kwargs: Arguments passed to `list_market_book` """ return itertools.chain(*( self.list_market_book(market_chunk, **kwargs) for market_chunk in utils.get_chunks(market_ids, chunk_size) ))
python
def iter_list_market_book(self, market_ids, chunk_size, **kwargs): """Split call to `list_market_book` into separate requests. :param list market_ids: List of market IDs :param int chunk_size: Number of records per chunk :param dict kwargs: Arguments passed to `list_market_book` """ return itertools.chain(*( self.list_market_book(market_chunk, **kwargs) for market_chunk in utils.get_chunks(market_ids, chunk_size) ))
[ "def", "iter_list_market_book", "(", "self", ",", "market_ids", ",", "chunk_size", ",", "**", "kwargs", ")", ":", "return", "itertools", ".", "chain", "(", "*", "(", "self", ".", "list_market_book", "(", "market_chunk", ",", "**", "kwargs", ")", "for", "ma...
Split call to `list_market_book` into separate requests. :param list market_ids: List of market IDs :param int chunk_size: Number of records per chunk :param dict kwargs: Arguments passed to `list_market_book`
[ "Split", "call", "to", "list_market_book", "into", "separate", "requests", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/betfair.py#L305-L315
train
jmcarp/betfair.py
betfair/betfair.py
Betfair.iter_list_market_profit_and_loss
def iter_list_market_profit_and_loss( self, market_ids, chunk_size, **kwargs): """Split call to `list_market_profit_and_loss` into separate requests. :param list market_ids: List of market IDs :param int chunk_size: Number of records per chunk :param dict kwargs: Arguments passed to `list_market_profit_and_loss` """ return itertools.chain(*( self.list_market_profit_and_loss(market_chunk, **kwargs) for market_chunk in utils.get_chunks(market_ids, chunk_size) ))
python
def iter_list_market_profit_and_loss( self, market_ids, chunk_size, **kwargs): """Split call to `list_market_profit_and_loss` into separate requests. :param list market_ids: List of market IDs :param int chunk_size: Number of records per chunk :param dict kwargs: Arguments passed to `list_market_profit_and_loss` """ return itertools.chain(*( self.list_market_profit_and_loss(market_chunk, **kwargs) for market_chunk in utils.get_chunks(market_ids, chunk_size) ))
[ "def", "iter_list_market_profit_and_loss", "(", "self", ",", "market_ids", ",", "chunk_size", ",", "**", "kwargs", ")", ":", "return", "itertools", ".", "chain", "(", "*", "(", "self", ".", "list_market_profit_and_loss", "(", "market_chunk", ",", "**", "kwargs",...
Split call to `list_market_profit_and_loss` into separate requests. :param list market_ids: List of market IDs :param int chunk_size: Number of records per chunk :param dict kwargs: Arguments passed to `list_market_profit_and_loss`
[ "Split", "call", "to", "list_market_profit_and_loss", "into", "separate", "requests", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/betfair.py#L317-L328
train
jmcarp/betfair.py
betfair/betfair.py
Betfair.place_orders
def place_orders(self, market_id, instructions, customer_ref=None): """Place new orders into market. This operation is atomic in that all orders will be placed or none will be placed. :param str market_id: The market id these orders are to be placed on :param list instructions: List of `PlaceInstruction` objects :param str customer_ref: Optional order identifier string """ return self.make_api_request( 'Sports', 'placeOrders', utils.get_kwargs(locals()), model=models.PlaceExecutionReport, )
python
def place_orders(self, market_id, instructions, customer_ref=None): """Place new orders into market. This operation is atomic in that all orders will be placed or none will be placed. :param str market_id: The market id these orders are to be placed on :param list instructions: List of `PlaceInstruction` objects :param str customer_ref: Optional order identifier string """ return self.make_api_request( 'Sports', 'placeOrders', utils.get_kwargs(locals()), model=models.PlaceExecutionReport, )
[ "def", "place_orders", "(", "self", ",", "market_id", ",", "instructions", ",", "customer_ref", "=", "None", ")", ":", "return", "self", ".", "make_api_request", "(", "'Sports'", ",", "'placeOrders'", ",", "utils", ".", "get_kwargs", "(", "locals", "(", ")",...
Place new orders into market. This operation is atomic in that all orders will be placed or none will be placed. :param str market_id: The market id these orders are to be placed on :param list instructions: List of `PlaceInstruction` objects :param str customer_ref: Optional order identifier string
[ "Place", "new", "orders", "into", "market", ".", "This", "operation", "is", "atomic", "in", "that", "all", "orders", "will", "be", "placed", "or", "none", "will", "be", "placed", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/betfair.py#L384-L397
train
jmcarp/betfair.py
betfair/betfair.py
Betfair.update_orders
def update_orders(self, market_id, instructions, customer_ref=None): """Update non-exposure changing fields. :param str market_id: The market id these orders are to be placed on :param list instructions: List of `UpdateInstruction` objects :param str customer_ref: Optional order identifier string """ return self.make_api_request( 'Sports', 'updateOrders', utils.get_kwargs(locals()), model=models.UpdateExecutionReport, )
python
def update_orders(self, market_id, instructions, customer_ref=None): """Update non-exposure changing fields. :param str market_id: The market id these orders are to be placed on :param list instructions: List of `UpdateInstruction` objects :param str customer_ref: Optional order identifier string """ return self.make_api_request( 'Sports', 'updateOrders', utils.get_kwargs(locals()), model=models.UpdateExecutionReport, )
[ "def", "update_orders", "(", "self", ",", "market_id", ",", "instructions", ",", "customer_ref", "=", "None", ")", ":", "return", "self", ".", "make_api_request", "(", "'Sports'", ",", "'updateOrders'", ",", "utils", ".", "get_kwargs", "(", "locals", "(", ")...
Update non-exposure changing fields. :param str market_id: The market id these orders are to be placed on :param list instructions: List of `UpdateInstruction` objects :param str customer_ref: Optional order identifier string
[ "Update", "non", "-", "exposure", "changing", "fields", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/betfair.py#L432-L444
train
jmcarp/betfair.py
betfair/betfair.py
Betfair.transfer_funds
def transfer_funds(self, from_, to, amount): """Transfer funds between the UK Exchange and Australian Exchange wallets. :param Wallet from_: Source wallet :param Wallet to: Destination wallet :param float amount: Amount to transfer """ return self.make_api_request( 'Account', 'transferFunds', utils.get_kwargs(locals()), model=models.TransferResponse, )
python
def transfer_funds(self, from_, to, amount): """Transfer funds between the UK Exchange and Australian Exchange wallets. :param Wallet from_: Source wallet :param Wallet to: Destination wallet :param float amount: Amount to transfer """ return self.make_api_request( 'Account', 'transferFunds', utils.get_kwargs(locals()), model=models.TransferResponse, )
[ "def", "transfer_funds", "(", "self", ",", "from_", ",", "to", ",", "amount", ")", ":", "return", "self", ".", "make_api_request", "(", "'Account'", ",", "'transferFunds'", ",", "utils", ".", "get_kwargs", "(", "locals", "(", ")", ")", ",", "model", "=",...
Transfer funds between the UK Exchange and Australian Exchange wallets. :param Wallet from_: Source wallet :param Wallet to: Destination wallet :param float amount: Amount to transfer
[ "Transfer", "funds", "between", "the", "UK", "Exchange", "and", "Australian", "Exchange", "wallets", "." ]
116df2fdc512575d1b4c4f1749d4a5bf98e519ff
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/betfair.py#L505-L517
train
edmondburnett/twitter-text-python
ttp/ttp.py
Parser.parse
def parse(self, text, html=True): '''Parse the text and return a ParseResult instance.''' self._urls = [] self._users = [] self._lists = [] self._tags = [] reply = REPLY_REGEX.match(text) reply = reply.groups(0)[0] if reply is not None else None parsed_html = self._html(text) if html else self._text(text) return ParseResult(self._urls, self._users, reply, self._lists, self._tags, parsed_html)
python
def parse(self, text, html=True): '''Parse the text and return a ParseResult instance.''' self._urls = [] self._users = [] self._lists = [] self._tags = [] reply = REPLY_REGEX.match(text) reply = reply.groups(0)[0] if reply is not None else None parsed_html = self._html(text) if html else self._text(text) return ParseResult(self._urls, self._users, reply, self._lists, self._tags, parsed_html)
[ "def", "parse", "(", "self", ",", "text", ",", "html", "=", "True", ")", ":", "self", ".", "_urls", "=", "[", "]", "self", ".", "_users", "=", "[", "]", "self", ".", "_lists", "=", "[", "]", "self", ".", "_tags", "=", "[", "]", "reply", "=", ...
Parse the text and return a ParseResult instance.
[ "Parse", "the", "text", "and", "return", "a", "ParseResult", "instance", "." ]
2a23ced35bfd34c4bc4b7148afd85771e9eb8669
https://github.com/edmondburnett/twitter-text-python/blob/2a23ced35bfd34c4bc4b7148afd85771e9eb8669/ttp/ttp.py#L125-L137
train
edmondburnett/twitter-text-python
ttp/ttp.py
Parser._text
def _text(self, text): '''Parse a Tweet without generating HTML.''' URL_REGEX.sub(self._parse_urls, text) USERNAME_REGEX.sub(self._parse_users, text) LIST_REGEX.sub(self._parse_lists, text) HASHTAG_REGEX.sub(self._parse_tags, text) return None
python
def _text(self, text): '''Parse a Tweet without generating HTML.''' URL_REGEX.sub(self._parse_urls, text) USERNAME_REGEX.sub(self._parse_users, text) LIST_REGEX.sub(self._parse_lists, text) HASHTAG_REGEX.sub(self._parse_tags, text) return None
[ "def", "_text", "(", "self", ",", "text", ")", ":", "URL_REGEX", ".", "sub", "(", "self", ".", "_parse_urls", ",", "text", ")", "USERNAME_REGEX", ".", "sub", "(", "self", ".", "_parse_users", ",", "text", ")", "LIST_REGEX", ".", "sub", "(", "self", "...
Parse a Tweet without generating HTML.
[ "Parse", "a", "Tweet", "without", "generating", "HTML", "." ]
2a23ced35bfd34c4bc4b7148afd85771e9eb8669
https://github.com/edmondburnett/twitter-text-python/blob/2a23ced35bfd34c4bc4b7148afd85771e9eb8669/ttp/ttp.py#L139-L145
train
edmondburnett/twitter-text-python
ttp/ttp.py
Parser._html
def _html(self, text): '''Parse a Tweet and generate HTML.''' html = URL_REGEX.sub(self._parse_urls, text) html = USERNAME_REGEX.sub(self._parse_users, html) html = LIST_REGEX.sub(self._parse_lists, html) return HASHTAG_REGEX.sub(self._parse_tags, html)
python
def _html(self, text): '''Parse a Tweet and generate HTML.''' html = URL_REGEX.sub(self._parse_urls, text) html = USERNAME_REGEX.sub(self._parse_users, html) html = LIST_REGEX.sub(self._parse_lists, html) return HASHTAG_REGEX.sub(self._parse_tags, html)
[ "def", "_html", "(", "self", ",", "text", ")", ":", "html", "=", "URL_REGEX", ".", "sub", "(", "self", ".", "_parse_urls", ",", "text", ")", "html", "=", "USERNAME_REGEX", ".", "sub", "(", "self", ".", "_parse_users", ",", "html", ")", "html", "=", ...
Parse a Tweet and generate HTML.
[ "Parse", "a", "Tweet", "and", "generate", "HTML", "." ]
2a23ced35bfd34c4bc4b7148afd85771e9eb8669
https://github.com/edmondburnett/twitter-text-python/blob/2a23ced35bfd34c4bc4b7148afd85771e9eb8669/ttp/ttp.py#L147-L152
train
edmondburnett/twitter-text-python
ttp/ttp.py
Parser._parse_urls
def _parse_urls(self, match): '''Parse URLs.''' mat = match.group(0) # Fix a bug in the regex concerning www...com and www.-foo.com domains # TODO fix this in the regex instead of working around it here domain = match.group(5) if domain[0] in '.-': return mat # Only allow IANA one letter domains that are actually registered if len(domain) == 5 \ and domain[-4:].lower() in ('.com', '.org', '.net') \ and not domain.lower() in IANA_ONE_LETTER_DOMAINS: return mat # Check for urls without http(s) pos = mat.find('http') if pos != -1: pre, url = mat[:pos], mat[pos:] full_url = url # Find the www and force https:// else: pos = mat.lower().find('www') pre, url = mat[:pos], mat[pos:] full_url = 'https://%s' % url if self._include_spans: span = match.span(0) # add an offset if pre is e.g. ' ' span = (span[0] + len(pre), span[1]) self._urls.append((url, span)) else: self._urls.append(url) if self._html: return '%s%s' % (pre, self.format_url(full_url, self._shorten_url(escape(url))))
python
def _parse_urls(self, match): '''Parse URLs.''' mat = match.group(0) # Fix a bug in the regex concerning www...com and www.-foo.com domains # TODO fix this in the regex instead of working around it here domain = match.group(5) if domain[0] in '.-': return mat # Only allow IANA one letter domains that are actually registered if len(domain) == 5 \ and domain[-4:].lower() in ('.com', '.org', '.net') \ and not domain.lower() in IANA_ONE_LETTER_DOMAINS: return mat # Check for urls without http(s) pos = mat.find('http') if pos != -1: pre, url = mat[:pos], mat[pos:] full_url = url # Find the www and force https:// else: pos = mat.lower().find('www') pre, url = mat[:pos], mat[pos:] full_url = 'https://%s' % url if self._include_spans: span = match.span(0) # add an offset if pre is e.g. ' ' span = (span[0] + len(pre), span[1]) self._urls.append((url, span)) else: self._urls.append(url) if self._html: return '%s%s' % (pre, self.format_url(full_url, self._shorten_url(escape(url))))
[ "def", "_parse_urls", "(", "self", ",", "match", ")", ":", "mat", "=", "match", ".", "group", "(", "0", ")", "domain", "=", "match", ".", "group", "(", "5", ")", "if", "domain", "[", "0", "]", "in", "'.-'", ":", "return", "mat", "if", "len", "(...
Parse URLs.
[ "Parse", "URLs", "." ]
2a23ced35bfd34c4bc4b7148afd85771e9eb8669
https://github.com/edmondburnett/twitter-text-python/blob/2a23ced35bfd34c4bc4b7148afd85771e9eb8669/ttp/ttp.py#L155-L195
train
edmondburnett/twitter-text-python
ttp/ttp.py
Parser._parse_users
def _parse_users(self, match): '''Parse usernames.''' # Don't parse lists here if match.group(2) is not None: return match.group(0) mat = match.group(0) if self._include_spans: self._users.append((mat[1:], match.span(0))) else: self._users.append(mat[1:]) if self._html: return self.format_username(mat[0:1], mat[1:])
python
def _parse_users(self, match): '''Parse usernames.''' # Don't parse lists here if match.group(2) is not None: return match.group(0) mat = match.group(0) if self._include_spans: self._users.append((mat[1:], match.span(0))) else: self._users.append(mat[1:]) if self._html: return self.format_username(mat[0:1], mat[1:])
[ "def", "_parse_users", "(", "self", ",", "match", ")", ":", "if", "match", ".", "group", "(", "2", ")", "is", "not", "None", ":", "return", "match", ".", "group", "(", "0", ")", "mat", "=", "match", ".", "group", "(", "0", ")", "if", "self", "....
Parse usernames.
[ "Parse", "usernames", "." ]
2a23ced35bfd34c4bc4b7148afd85771e9eb8669
https://github.com/edmondburnett/twitter-text-python/blob/2a23ced35bfd34c4bc4b7148afd85771e9eb8669/ttp/ttp.py#L197-L211
train
edmondburnett/twitter-text-python
ttp/ttp.py
Parser._parse_lists
def _parse_lists(self, match): '''Parse lists.''' # Don't parse usernames here if match.group(4) is None: return match.group(0) pre, at_char, user, list_name = match.groups() list_name = list_name[1:] if self._include_spans: self._lists.append((user, list_name, match.span(0))) else: self._lists.append((user, list_name)) if self._html: return '%s%s' % (pre, self.format_list(at_char, user, list_name))
python
def _parse_lists(self, match): '''Parse lists.''' # Don't parse usernames here if match.group(4) is None: return match.group(0) pre, at_char, user, list_name = match.groups() list_name = list_name[1:] if self._include_spans: self._lists.append((user, list_name, match.span(0))) else: self._lists.append((user, list_name)) if self._html: return '%s%s' % (pre, self.format_list(at_char, user, list_name))
[ "def", "_parse_lists", "(", "self", ",", "match", ")", ":", "if", "match", ".", "group", "(", "4", ")", "is", "None", ":", "return", "match", ".", "group", "(", "0", ")", "pre", ",", "at_char", ",", "user", ",", "list_name", "=", "match", ".", "g...
Parse lists.
[ "Parse", "lists", "." ]
2a23ced35bfd34c4bc4b7148afd85771e9eb8669
https://github.com/edmondburnett/twitter-text-python/blob/2a23ced35bfd34c4bc4b7148afd85771e9eb8669/ttp/ttp.py#L213-L228
train
edmondburnett/twitter-text-python
ttp/ttp.py
Parser._parse_tags
def _parse_tags(self, match): '''Parse hashtags.''' mat = match.group(0) # Fix problems with the regex capturing stuff infront of the # tag = None for i in '#\uff03': pos = mat.rfind(i) if pos != -1: tag = i break pre, text = mat[:pos], mat[pos + 1:] if self._include_spans: span = match.span(0) # add an offset if pre is e.g. ' ' span = (span[0] + len(pre), span[1]) self._tags.append((text, span)) else: self._tags.append(text) if self._html: return '%s%s' % (pre, self.format_tag(tag, text))
python
def _parse_tags(self, match): '''Parse hashtags.''' mat = match.group(0) # Fix problems with the regex capturing stuff infront of the # tag = None for i in '#\uff03': pos = mat.rfind(i) if pos != -1: tag = i break pre, text = mat[:pos], mat[pos + 1:] if self._include_spans: span = match.span(0) # add an offset if pre is e.g. ' ' span = (span[0] + len(pre), span[1]) self._tags.append((text, span)) else: self._tags.append(text) if self._html: return '%s%s' % (pre, self.format_tag(tag, text))
[ "def", "_parse_tags", "(", "self", ",", "match", ")", ":", "mat", "=", "match", ".", "group", "(", "0", ")", "tag", "=", "None", "for", "i", "in", "'#\\uff03'", ":", "pos", "=", "mat", ".", "rfind", "(", "i", ")", "if", "pos", "!=", "-", "1", ...
Parse hashtags.
[ "Parse", "hashtags", "." ]
2a23ced35bfd34c4bc4b7148afd85771e9eb8669
https://github.com/edmondburnett/twitter-text-python/blob/2a23ced35bfd34c4bc4b7148afd85771e9eb8669/ttp/ttp.py#L230-L253
train
edmondburnett/twitter-text-python
ttp/ttp.py
Parser._shorten_url
def _shorten_url(self, text): '''Shorten a URL and make sure to not cut of html entities.''' if len(text) > self._max_url_length and self._max_url_length != -1: text = text[0:self._max_url_length - 3] amp = text.rfind('&') close = text.rfind(';') if amp != -1 and (close == -1 or close < amp): text = text[0:amp] return text + '...' else: return text
python
def _shorten_url(self, text): '''Shorten a URL and make sure to not cut of html entities.''' if len(text) > self._max_url_length and self._max_url_length != -1: text = text[0:self._max_url_length - 3] amp = text.rfind('&') close = text.rfind(';') if amp != -1 and (close == -1 or close < amp): text = text[0:amp] return text + '...' else: return text
[ "def", "_shorten_url", "(", "self", ",", "text", ")", ":", "if", "len", "(", "text", ")", ">", "self", ".", "_max_url_length", "and", "self", ".", "_max_url_length", "!=", "-", "1", ":", "text", "=", "text", "[", "0", ":", "self", ".", "_max_url_leng...
Shorten a URL and make sure to not cut of html entities.
[ "Shorten", "a", "URL", "and", "make", "sure", "to", "not", "cut", "of", "html", "entities", "." ]
2a23ced35bfd34c4bc4b7148afd85771e9eb8669
https://github.com/edmondburnett/twitter-text-python/blob/2a23ced35bfd34c4bc4b7148afd85771e9eb8669/ttp/ttp.py#L255-L268
train
edmondburnett/twitter-text-python
ttp/ttp.py
Parser.format_list
def format_list(self, at_char, user, list_name): '''Return formatted HTML for a list.''' return '<a href="https://twitter.com/%s/lists/%s">%s%s/%s</a>' \ % (user, list_name, at_char, user, list_name)
python
def format_list(self, at_char, user, list_name): '''Return formatted HTML for a list.''' return '<a href="https://twitter.com/%s/lists/%s">%s%s/%s</a>' \ % (user, list_name, at_char, user, list_name)
[ "def", "format_list", "(", "self", ",", "at_char", ",", "user", ",", "list_name", ")", ":", "return", "'<a href=\"https://twitter.com/%s/lists/%s\">%s%s/%s</a>'", "%", "(", "user", ",", "list_name", ",", "at_char", ",", "user", ",", "list_name", ")" ]
Return formatted HTML for a list.
[ "Return", "formatted", "HTML", "for", "a", "list", "." ]
2a23ced35bfd34c4bc4b7148afd85771e9eb8669
https://github.com/edmondburnett/twitter-text-python/blob/2a23ced35bfd34c4bc4b7148afd85771e9eb8669/ttp/ttp.py#L281-L284
train
edmondburnett/twitter-text-python
ttp/utils.py
follow_shortlinks
def follow_shortlinks(shortlinks): """Follow redirects in list of shortlinks, return dict of resulting URLs""" links_followed = {} for shortlink in shortlinks: url = shortlink request_result = requests.get(url) redirect_history = request_result.history # history might look like: # (<Response [301]>, <Response [301]>) # where each response object has a URL all_urls = [] for redirect in redirect_history: all_urls.append(redirect.url) # append the final URL that we finish with all_urls.append(request_result.url) links_followed[shortlink] = all_urls return links_followed
python
def follow_shortlinks(shortlinks): """Follow redirects in list of shortlinks, return dict of resulting URLs""" links_followed = {} for shortlink in shortlinks: url = shortlink request_result = requests.get(url) redirect_history = request_result.history # history might look like: # (<Response [301]>, <Response [301]>) # where each response object has a URL all_urls = [] for redirect in redirect_history: all_urls.append(redirect.url) # append the final URL that we finish with all_urls.append(request_result.url) links_followed[shortlink] = all_urls return links_followed
[ "def", "follow_shortlinks", "(", "shortlinks", ")", ":", "links_followed", "=", "{", "}", "for", "shortlink", "in", "shortlinks", ":", "url", "=", "shortlink", "request_result", "=", "requests", ".", "get", "(", "url", ")", "redirect_history", "=", "request_re...
Follow redirects in list of shortlinks, return dict of resulting URLs
[ "Follow", "redirects", "in", "list", "of", "shortlinks", "return", "dict", "of", "resulting", "URLs" ]
2a23ced35bfd34c4bc4b7148afd85771e9eb8669
https://github.com/edmondburnett/twitter-text-python/blob/2a23ced35bfd34c4bc4b7148afd85771e9eb8669/ttp/utils.py#L8-L24
train
cloudendpoints/endpoints-python
endpoints/resource_container.py
_GetFieldAttributes
def _GetFieldAttributes(field): """Decomposes field into the needed arguments to pass to the constructor. This can be used to create copies of the field or to compare if two fields are "equal" (since __eq__ is not implemented on messages.Field). Args: field: A ProtoRPC message field (potentially to be copied). Raises: TypeError: If the field is not an instance of messages.Field. Returns: A pair of relevant arguments to be passed to the constructor for the field type. The first element is a list of positional arguments for the constructor and the second is a dictionary of keyword arguments. """ if not isinstance(field, messages.Field): raise TypeError('Field %r to be copied not a ProtoRPC field.' % (field,)) positional_args = [] kwargs = { 'required': field.required, 'repeated': field.repeated, 'variant': field.variant, 'default': field._Field__default, # pylint: disable=protected-access } if isinstance(field, messages.MessageField): # Message fields can't have a default kwargs.pop('default') if not isinstance(field, message_types.DateTimeField): positional_args.insert(0, field.message_type) elif isinstance(field, messages.EnumField): positional_args.insert(0, field.type) return positional_args, kwargs
python
def _GetFieldAttributes(field): """Decomposes field into the needed arguments to pass to the constructor. This can be used to create copies of the field or to compare if two fields are "equal" (since __eq__ is not implemented on messages.Field). Args: field: A ProtoRPC message field (potentially to be copied). Raises: TypeError: If the field is not an instance of messages.Field. Returns: A pair of relevant arguments to be passed to the constructor for the field type. The first element is a list of positional arguments for the constructor and the second is a dictionary of keyword arguments. """ if not isinstance(field, messages.Field): raise TypeError('Field %r to be copied not a ProtoRPC field.' % (field,)) positional_args = [] kwargs = { 'required': field.required, 'repeated': field.repeated, 'variant': field.variant, 'default': field._Field__default, # pylint: disable=protected-access } if isinstance(field, messages.MessageField): # Message fields can't have a default kwargs.pop('default') if not isinstance(field, message_types.DateTimeField): positional_args.insert(0, field.message_type) elif isinstance(field, messages.EnumField): positional_args.insert(0, field.type) return positional_args, kwargs
[ "def", "_GetFieldAttributes", "(", "field", ")", ":", "if", "not", "isinstance", "(", "field", ",", "messages", ".", "Field", ")", ":", "raise", "TypeError", "(", "'Field %r to be copied not a ProtoRPC field.'", "%", "(", "field", ",", ")", ")", "positional_args...
Decomposes field into the needed arguments to pass to the constructor. This can be used to create copies of the field or to compare if two fields are "equal" (since __eq__ is not implemented on messages.Field). Args: field: A ProtoRPC message field (potentially to be copied). Raises: TypeError: If the field is not an instance of messages.Field. Returns: A pair of relevant arguments to be passed to the constructor for the field type. The first element is a list of positional arguments for the constructor and the second is a dictionary of keyword arguments.
[ "Decomposes", "field", "into", "the", "needed", "arguments", "to", "pass", "to", "the", "constructor", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/resource_container.py#L142-L178
train
cloudendpoints/endpoints-python
endpoints/resource_container.py
_CompareFields
def _CompareFields(field, other_field): """Checks if two ProtoRPC fields are "equal". Compares the arguments, rather than the id of the elements (which is the default __eq__ behavior) as well as the class of the fields. Args: field: A ProtoRPC message field to be compared. other_field: A ProtoRPC message field to be compared. Returns: Boolean indicating whether the fields are equal. """ field_attrs = _GetFieldAttributes(field) other_field_attrs = _GetFieldAttributes(other_field) if field_attrs != other_field_attrs: return False return field.__class__ == other_field.__class__
python
def _CompareFields(field, other_field): """Checks if two ProtoRPC fields are "equal". Compares the arguments, rather than the id of the elements (which is the default __eq__ behavior) as well as the class of the fields. Args: field: A ProtoRPC message field to be compared. other_field: A ProtoRPC message field to be compared. Returns: Boolean indicating whether the fields are equal. """ field_attrs = _GetFieldAttributes(field) other_field_attrs = _GetFieldAttributes(other_field) if field_attrs != other_field_attrs: return False return field.__class__ == other_field.__class__
[ "def", "_CompareFields", "(", "field", ",", "other_field", ")", ":", "field_attrs", "=", "_GetFieldAttributes", "(", "field", ")", "other_field_attrs", "=", "_GetFieldAttributes", "(", "other_field", ")", "if", "field_attrs", "!=", "other_field_attrs", ":", "return"...
Checks if two ProtoRPC fields are "equal". Compares the arguments, rather than the id of the elements (which is the default __eq__ behavior) as well as the class of the fields. Args: field: A ProtoRPC message field to be compared. other_field: A ProtoRPC message field to be compared. Returns: Boolean indicating whether the fields are equal.
[ "Checks", "if", "two", "ProtoRPC", "fields", "are", "equal", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/resource_container.py#L181-L198
train
cloudendpoints/endpoints-python
endpoints/resource_container.py
ResourceContainer.combined_message_class
def combined_message_class(self): """A ProtoRPC message class with both request and parameters fields. Caches the result in a local private variable. Uses _CopyField to create copies of the fields from the existing request and parameters classes since those fields are "owned" by the message classes. Raises: TypeError: If a field name is used in both the request message and the parameters but the two fields do not represent the same type. Returns: Value of combined message class for this property. """ if self.__combined_message_class is not None: return self.__combined_message_class fields = {} # We don't need to preserve field.number since this combined class is only # used for the protorpc remote.method and is not needed for the API config. # The only place field.number matters is in parameterOrder, but this is set # based on container.parameters_message_class which will use the field # numbers originally passed in. # Counter for fields. field_number = 1 for field in self.body_message_class.all_fields(): fields[field.name] = _CopyField(field, number=field_number) field_number += 1 for field in self.parameters_message_class.all_fields(): if field.name in fields: if not _CompareFields(field, fields[field.name]): raise TypeError('Field %r contained in both parameters and request ' 'body, but the fields differ.' % (field.name,)) else: # Skip a field that's already there. continue fields[field.name] = _CopyField(field, number=field_number) field_number += 1 self.__combined_message_class = type('CombinedContainer', (messages.Message,), fields) return self.__combined_message_class
python
def combined_message_class(self): """A ProtoRPC message class with both request and parameters fields. Caches the result in a local private variable. Uses _CopyField to create copies of the fields from the existing request and parameters classes since those fields are "owned" by the message classes. Raises: TypeError: If a field name is used in both the request message and the parameters but the two fields do not represent the same type. Returns: Value of combined message class for this property. """ if self.__combined_message_class is not None: return self.__combined_message_class fields = {} # We don't need to preserve field.number since this combined class is only # used for the protorpc remote.method and is not needed for the API config. # The only place field.number matters is in parameterOrder, but this is set # based on container.parameters_message_class which will use the field # numbers originally passed in. # Counter for fields. field_number = 1 for field in self.body_message_class.all_fields(): fields[field.name] = _CopyField(field, number=field_number) field_number += 1 for field in self.parameters_message_class.all_fields(): if field.name in fields: if not _CompareFields(field, fields[field.name]): raise TypeError('Field %r contained in both parameters and request ' 'body, but the fields differ.' % (field.name,)) else: # Skip a field that's already there. continue fields[field.name] = _CopyField(field, number=field_number) field_number += 1 self.__combined_message_class = type('CombinedContainer', (messages.Message,), fields) return self.__combined_message_class
[ "def", "combined_message_class", "(", "self", ")", ":", "if", "self", ".", "__combined_message_class", "is", "not", "None", ":", "return", "self", ".", "__combined_message_class", "fields", "=", "{", "}", "field_number", "=", "1", "for", "field", "in", "self",...
A ProtoRPC message class with both request and parameters fields. Caches the result in a local private variable. Uses _CopyField to create copies of the fields from the existing request and parameters classes since those fields are "owned" by the message classes. Raises: TypeError: If a field name is used in both the request message and the parameters but the two fields do not represent the same type. Returns: Value of combined message class for this property.
[ "A", "ProtoRPC", "message", "class", "with", "both", "request", "and", "parameters", "fields", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/resource_container.py#L58-L100
train
cloudendpoints/endpoints-python
endpoints/resource_container.py
ResourceContainer.add_to_cache
def add_to_cache(cls, remote_info, container): # pylint: disable=g-bad-name """Adds a ResourceContainer to a cache tying it to a protorpc method. Args: remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding to a method. container: An instance of ResourceContainer. Raises: TypeError: if the container is not an instance of cls. KeyError: if the remote method has been reference by a container before. This created remote method should never occur because a remote method is created once. """ if not isinstance(container, cls): raise TypeError('%r not an instance of %r, could not be added to cache.' % (container, cls)) if remote_info in cls.__remote_info_cache: raise KeyError('Cache has collision but should not.') cls.__remote_info_cache[remote_info] = container
python
def add_to_cache(cls, remote_info, container): # pylint: disable=g-bad-name """Adds a ResourceContainer to a cache tying it to a protorpc method. Args: remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding to a method. container: An instance of ResourceContainer. Raises: TypeError: if the container is not an instance of cls. KeyError: if the remote method has been reference by a container before. This created remote method should never occur because a remote method is created once. """ if not isinstance(container, cls): raise TypeError('%r not an instance of %r, could not be added to cache.' % (container, cls)) if remote_info in cls.__remote_info_cache: raise KeyError('Cache has collision but should not.') cls.__remote_info_cache[remote_info] = container
[ "def", "add_to_cache", "(", "cls", ",", "remote_info", ",", "container", ")", ":", "if", "not", "isinstance", "(", "container", ",", "cls", ")", ":", "raise", "TypeError", "(", "'%r not an instance of %r, could not be added to cache.'", "%", "(", "container", ",",...
Adds a ResourceContainer to a cache tying it to a protorpc method. Args: remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding to a method. container: An instance of ResourceContainer. Raises: TypeError: if the container is not an instance of cls. KeyError: if the remote method has been reference by a container before. This created remote method should never occur because a remote method is created once.
[ "Adds", "a", "ResourceContainer", "to", "a", "cache", "tying", "it", "to", "a", "protorpc", "method", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/resource_container.py#L103-L122
train
cloudendpoints/endpoints-python
endpoints/resource_container.py
ResourceContainer.get_request_message
def get_request_message(cls, remote_info): # pylint: disable=g-bad-name """Gets request message or container from remote info. Args: remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding to a method. Returns: Either an instance of the request type from the remote or the ResourceContainer that was cached with the remote method. """ if remote_info in cls.__remote_info_cache: return cls.__remote_info_cache[remote_info] else: return remote_info.request_type()
python
def get_request_message(cls, remote_info): # pylint: disable=g-bad-name """Gets request message or container from remote info. Args: remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding to a method. Returns: Either an instance of the request type from the remote or the ResourceContainer that was cached with the remote method. """ if remote_info in cls.__remote_info_cache: return cls.__remote_info_cache[remote_info] else: return remote_info.request_type()
[ "def", "get_request_message", "(", "cls", ",", "remote_info", ")", ":", "if", "remote_info", "in", "cls", ".", "__remote_info_cache", ":", "return", "cls", ".", "__remote_info_cache", "[", "remote_info", "]", "else", ":", "return", "remote_info", ".", "request_t...
Gets request message or container from remote info. Args: remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding to a method. Returns: Either an instance of the request type from the remote or the ResourceContainer that was cached with the remote method.
[ "Gets", "request", "message", "or", "container", "from", "remote", "info", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/resource_container.py#L125-L139
train
cloudendpoints/endpoints-python
endpoints/users_id_token.py
_is_auth_info_available
def _is_auth_info_available(): """Check if user auth info has been set in environment variables.""" return (_ENDPOINTS_USER_INFO in os.environ or (_ENV_AUTH_EMAIL in os.environ and _ENV_AUTH_DOMAIN in os.environ) or _ENV_USE_OAUTH_SCOPE in os.environ)
python
def _is_auth_info_available(): """Check if user auth info has been set in environment variables.""" return (_ENDPOINTS_USER_INFO in os.environ or (_ENV_AUTH_EMAIL in os.environ and _ENV_AUTH_DOMAIN in os.environ) or _ENV_USE_OAUTH_SCOPE in os.environ)
[ "def", "_is_auth_info_available", "(", ")", ":", "return", "(", "_ENDPOINTS_USER_INFO", "in", "os", ".", "environ", "or", "(", "_ENV_AUTH_EMAIL", "in", "os", ".", "environ", "and", "_ENV_AUTH_DOMAIN", "in", "os", ".", "environ", ")", "or", "_ENV_USE_OAUTH_SCOPE"...
Check if user auth info has been set in environment variables.
[ "Check", "if", "user", "auth", "info", "has", "been", "set", "in", "environment", "variables", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L151-L155
train
cloudendpoints/endpoints-python
endpoints/users_id_token.py
_get_token
def _get_token( request=None, allowed_auth_schemes=('OAuth', 'Bearer'), allowed_query_keys=('bearer_token', 'access_token')): """Get the auth token for this request. Auth token may be specified in either the Authorization header or as a query param (either access_token or bearer_token). We'll check in this order: 1. Authorization header. 2. bearer_token query param. 3. access_token query param. Args: request: The current request, or None. Returns: The token in the request or None. """ allowed_auth_schemes = _listlike_guard( allowed_auth_schemes, 'allowed_auth_schemes', iterable_only=True) # Check if the token is in the Authorization header. auth_header = os.environ.get('HTTP_AUTHORIZATION') if auth_header: for auth_scheme in allowed_auth_schemes: if auth_header.startswith(auth_scheme): return auth_header[len(auth_scheme) + 1:] # If an auth header was specified, even if it's an invalid one, we won't # look for the token anywhere else. return None # Check if the token is in the query string. if request: allowed_query_keys = _listlike_guard( allowed_query_keys, 'allowed_query_keys', iterable_only=True) for key in allowed_query_keys: token, _ = request.get_unrecognized_field_info(key) if token: return token
python
def _get_token( request=None, allowed_auth_schemes=('OAuth', 'Bearer'), allowed_query_keys=('bearer_token', 'access_token')): """Get the auth token for this request. Auth token may be specified in either the Authorization header or as a query param (either access_token or bearer_token). We'll check in this order: 1. Authorization header. 2. bearer_token query param. 3. access_token query param. Args: request: The current request, or None. Returns: The token in the request or None. """ allowed_auth_schemes = _listlike_guard( allowed_auth_schemes, 'allowed_auth_schemes', iterable_only=True) # Check if the token is in the Authorization header. auth_header = os.environ.get('HTTP_AUTHORIZATION') if auth_header: for auth_scheme in allowed_auth_schemes: if auth_header.startswith(auth_scheme): return auth_header[len(auth_scheme) + 1:] # If an auth header was specified, even if it's an invalid one, we won't # look for the token anywhere else. return None # Check if the token is in the query string. if request: allowed_query_keys = _listlike_guard( allowed_query_keys, 'allowed_query_keys', iterable_only=True) for key in allowed_query_keys: token, _ = request.get_unrecognized_field_info(key) if token: return token
[ "def", "_get_token", "(", "request", "=", "None", ",", "allowed_auth_schemes", "=", "(", "'OAuth'", ",", "'Bearer'", ")", ",", "allowed_query_keys", "=", "(", "'bearer_token'", ",", "'access_token'", ")", ")", ":", "allowed_auth_schemes", "=", "_listlike_guard", ...
Get the auth token for this request. Auth token may be specified in either the Authorization header or as a query param (either access_token or bearer_token). We'll check in this order: 1. Authorization header. 2. bearer_token query param. 3. access_token query param. Args: request: The current request, or None. Returns: The token in the request or None.
[ "Get", "the", "auth", "token", "for", "this", "request", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L248-L285
train
cloudendpoints/endpoints-python
endpoints/users_id_token.py
_get_id_token_user
def _get_id_token_user(token, issuers, audiences, allowed_client_ids, time_now, cache): """Get a User for the given id token, if the token is valid. Args: token: The id_token to check. issuers: dict of Issuers audiences: List of audiences that are acceptable. allowed_client_ids: List of client IDs that are acceptable. time_now: The current time as a long (eg. long(time.time())). cache: Cache to use (eg. the memcache module). Returns: A User if the token is valid, None otherwise. """ # Verify that the token is valid before we try to extract anything from it. # This verifies the signature and some of the basic info in the token. for issuer_key, issuer in issuers.items(): issuer_cert_uri = convert_jwks_uri(issuer.jwks_uri) try: parsed_token = _verify_signed_jwt_with_certs( token, time_now, cache, cert_uri=issuer_cert_uri) except Exception: # pylint: disable=broad-except _logger.debug( 'id_token verification failed for issuer %s', issuer_key, exc_info=True) continue issuer_values = _listlike_guard(issuer.issuer, 'issuer', log_warning=False) if isinstance(audiences, _Mapping): audiences = audiences[issuer_key] if _verify_parsed_token( parsed_token, issuer_values, audiences, allowed_client_ids, # There's some special handling we do for Google issuers. # ESP doesn't do this, and it's both unnecessary and invalid for other issuers. # So we'll turn it off except in the Google issuer case. is_legacy_google_auth=(issuer.issuer == _ISSUERS)): email = parsed_token['email'] # The token might have an id, but it's a Gaia ID that's been # obfuscated with the Focus key, rather than the AppEngine (igoogle) # key. If the developer ever put this email into the user DB # and retrieved the ID from that, it'd be different from the ID we'd # return here, so it's safer to not return the ID. # Instead, we'll only return the email. return users.User(email)
python
def _get_id_token_user(token, issuers, audiences, allowed_client_ids, time_now, cache): """Get a User for the given id token, if the token is valid. Args: token: The id_token to check. issuers: dict of Issuers audiences: List of audiences that are acceptable. allowed_client_ids: List of client IDs that are acceptable. time_now: The current time as a long (eg. long(time.time())). cache: Cache to use (eg. the memcache module). Returns: A User if the token is valid, None otherwise. """ # Verify that the token is valid before we try to extract anything from it. # This verifies the signature and some of the basic info in the token. for issuer_key, issuer in issuers.items(): issuer_cert_uri = convert_jwks_uri(issuer.jwks_uri) try: parsed_token = _verify_signed_jwt_with_certs( token, time_now, cache, cert_uri=issuer_cert_uri) except Exception: # pylint: disable=broad-except _logger.debug( 'id_token verification failed for issuer %s', issuer_key, exc_info=True) continue issuer_values = _listlike_guard(issuer.issuer, 'issuer', log_warning=False) if isinstance(audiences, _Mapping): audiences = audiences[issuer_key] if _verify_parsed_token( parsed_token, issuer_values, audiences, allowed_client_ids, # There's some special handling we do for Google issuers. # ESP doesn't do this, and it's both unnecessary and invalid for other issuers. # So we'll turn it off except in the Google issuer case. is_legacy_google_auth=(issuer.issuer == _ISSUERS)): email = parsed_token['email'] # The token might have an id, but it's a Gaia ID that's been # obfuscated with the Focus key, rather than the AppEngine (igoogle) # key. If the developer ever put this email into the user DB # and retrieved the ID from that, it'd be different from the ID we'd # return here, so it's safer to not return the ID. # Instead, we'll only return the email. return users.User(email)
[ "def", "_get_id_token_user", "(", "token", ",", "issuers", ",", "audiences", ",", "allowed_client_ids", ",", "time_now", ",", "cache", ")", ":", "for", "issuer_key", ",", "issuer", "in", "issuers", ".", "items", "(", ")", ":", "issuer_cert_uri", "=", "conver...
Get a User for the given id token, if the token is valid. Args: token: The id_token to check. issuers: dict of Issuers audiences: List of audiences that are acceptable. allowed_client_ids: List of client IDs that are acceptable. time_now: The current time as a long (eg. long(time.time())). cache: Cache to use (eg. the memcache module). Returns: A User if the token is valid, None otherwise.
[ "Get", "a", "User", "for", "the", "given", "id", "token", "if", "the", "token", "is", "valid", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L288-L330
train
cloudendpoints/endpoints-python
endpoints/users_id_token.py
_process_scopes
def _process_scopes(scopes): """Parse a scopes list into a set of all scopes and a set of sufficient scope sets. scopes: A list of strings, each of which is a space-separated list of scopes. Examples: ['scope1'] ['scope1', 'scope2'] ['scope1', 'scope2 scope3'] Returns: all_scopes: a set of strings, each of which is one scope to check for sufficient_scopes: a set of sets of strings; each inner set is a set of scopes which are sufficient for access. Example: {{'scope1'}, {'scope2', 'scope3'}} """ all_scopes = set() sufficient_scopes = set() for scope_set in scopes: scope_set_scopes = frozenset(scope_set.split()) all_scopes.update(scope_set_scopes) sufficient_scopes.add(scope_set_scopes) return all_scopes, sufficient_scopes
python
def _process_scopes(scopes): """Parse a scopes list into a set of all scopes and a set of sufficient scope sets. scopes: A list of strings, each of which is a space-separated list of scopes. Examples: ['scope1'] ['scope1', 'scope2'] ['scope1', 'scope2 scope3'] Returns: all_scopes: a set of strings, each of which is one scope to check for sufficient_scopes: a set of sets of strings; each inner set is a set of scopes which are sufficient for access. Example: {{'scope1'}, {'scope2', 'scope3'}} """ all_scopes = set() sufficient_scopes = set() for scope_set in scopes: scope_set_scopes = frozenset(scope_set.split()) all_scopes.update(scope_set_scopes) sufficient_scopes.add(scope_set_scopes) return all_scopes, sufficient_scopes
[ "def", "_process_scopes", "(", "scopes", ")", ":", "all_scopes", "=", "set", "(", ")", "sufficient_scopes", "=", "set", "(", ")", "for", "scope_set", "in", "scopes", ":", "scope_set_scopes", "=", "frozenset", "(", "scope_set", ".", "split", "(", ")", ")", ...
Parse a scopes list into a set of all scopes and a set of sufficient scope sets. scopes: A list of strings, each of which is a space-separated list of scopes. Examples: ['scope1'] ['scope1', 'scope2'] ['scope1', 'scope2 scope3'] Returns: all_scopes: a set of strings, each of which is one scope to check for sufficient_scopes: a set of sets of strings; each inner set is a set of scopes which are sufficient for access. Example: {{'scope1'}, {'scope2', 'scope3'}}
[ "Parse", "a", "scopes", "list", "into", "a", "set", "of", "all", "scopes", "and", "a", "set", "of", "sufficient", "scope", "sets", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L342-L362
train
cloudendpoints/endpoints-python
endpoints/users_id_token.py
_are_scopes_sufficient
def _are_scopes_sufficient(authorized_scopes, sufficient_scopes): """Check if a list of authorized scopes satisfies any set of sufficient scopes. Args: authorized_scopes: a list of strings, return value from oauth.get_authorized_scopes sufficient_scopes: a set of sets of strings, return value from _process_scopes """ for sufficient_scope_set in sufficient_scopes: if sufficient_scope_set.issubset(authorized_scopes): return True return False
python
def _are_scopes_sufficient(authorized_scopes, sufficient_scopes): """Check if a list of authorized scopes satisfies any set of sufficient scopes. Args: authorized_scopes: a list of strings, return value from oauth.get_authorized_scopes sufficient_scopes: a set of sets of strings, return value from _process_scopes """ for sufficient_scope_set in sufficient_scopes: if sufficient_scope_set.issubset(authorized_scopes): return True return False
[ "def", "_are_scopes_sufficient", "(", "authorized_scopes", ",", "sufficient_scopes", ")", ":", "for", "sufficient_scope_set", "in", "sufficient_scopes", ":", "if", "sufficient_scope_set", ".", "issubset", "(", "authorized_scopes", ")", ":", "return", "True", "return", ...
Check if a list of authorized scopes satisfies any set of sufficient scopes. Args: authorized_scopes: a list of strings, return value from oauth.get_authorized_scopes sufficient_scopes: a set of sets of strings, return value from _process_scopes
[ "Check", "if", "a", "list", "of", "authorized", "scopes", "satisfies", "any", "set", "of", "sufficient", "scopes", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L365-L375
train
cloudendpoints/endpoints-python
endpoints/users_id_token.py
_set_bearer_user_vars
def _set_bearer_user_vars(allowed_client_ids, scopes): """Validate the oauth bearer token and set endpoints auth user variables. If the bearer token is valid, this sets ENDPOINTS_USE_OAUTH_SCOPE. This provides enough information that our endpoints.get_current_user() function can get the user. Args: allowed_client_ids: List of client IDs that are acceptable. scopes: List of acceptable scopes. """ all_scopes, sufficient_scopes = _process_scopes(scopes) try: authorized_scopes = oauth.get_authorized_scopes(sorted(all_scopes)) except oauth.Error: _logger.debug('Unable to get authorized scopes.', exc_info=True) return if not _are_scopes_sufficient(authorized_scopes, sufficient_scopes): _logger.warning('Authorized scopes did not satisfy scope requirements.') return client_id = oauth.get_client_id(authorized_scopes) # The client ID must be in allowed_client_ids. If allowed_client_ids is # empty, don't allow any client ID. If allowed_client_ids is set to # SKIP_CLIENT_ID_CHECK, all client IDs will be allowed. if (list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK and client_id not in allowed_client_ids): _logger.warning('Client ID is not allowed: %s', client_id) return os.environ[_ENV_USE_OAUTH_SCOPE] = ' '.join(authorized_scopes) _logger.debug('get_current_user() will return user from matched oauth_user.')
python
def _set_bearer_user_vars(allowed_client_ids, scopes): """Validate the oauth bearer token and set endpoints auth user variables. If the bearer token is valid, this sets ENDPOINTS_USE_OAUTH_SCOPE. This provides enough information that our endpoints.get_current_user() function can get the user. Args: allowed_client_ids: List of client IDs that are acceptable. scopes: List of acceptable scopes. """ all_scopes, sufficient_scopes = _process_scopes(scopes) try: authorized_scopes = oauth.get_authorized_scopes(sorted(all_scopes)) except oauth.Error: _logger.debug('Unable to get authorized scopes.', exc_info=True) return if not _are_scopes_sufficient(authorized_scopes, sufficient_scopes): _logger.warning('Authorized scopes did not satisfy scope requirements.') return client_id = oauth.get_client_id(authorized_scopes) # The client ID must be in allowed_client_ids. If allowed_client_ids is # empty, don't allow any client ID. If allowed_client_ids is set to # SKIP_CLIENT_ID_CHECK, all client IDs will be allowed. if (list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK and client_id not in allowed_client_ids): _logger.warning('Client ID is not allowed: %s', client_id) return os.environ[_ENV_USE_OAUTH_SCOPE] = ' '.join(authorized_scopes) _logger.debug('get_current_user() will return user from matched oauth_user.')
[ "def", "_set_bearer_user_vars", "(", "allowed_client_ids", ",", "scopes", ")", ":", "all_scopes", ",", "sufficient_scopes", "=", "_process_scopes", "(", "scopes", ")", "try", ":", "authorized_scopes", "=", "oauth", ".", "get_authorized_scopes", "(", "sorted", "(", ...
Validate the oauth bearer token and set endpoints auth user variables. If the bearer token is valid, this sets ENDPOINTS_USE_OAUTH_SCOPE. This provides enough information that our endpoints.get_current_user() function can get the user. Args: allowed_client_ids: List of client IDs that are acceptable. scopes: List of acceptable scopes.
[ "Validate", "the", "oauth", "bearer", "token", "and", "set", "endpoints", "auth", "user", "variables", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L379-L410
train
cloudendpoints/endpoints-python
endpoints/users_id_token.py
_set_bearer_user_vars_local
def _set_bearer_user_vars_local(token, allowed_client_ids, scopes): """Validate the oauth bearer token on the dev server. Since the functions in the oauth module return only example results in local development, this hits the tokeninfo endpoint and attempts to validate the token. If it's valid, we'll set _ENV_AUTH_EMAIL and _ENV_AUTH_DOMAIN so we can get the user from the token. Args: token: String with the oauth token to validate. allowed_client_ids: List of client IDs that are acceptable. scopes: List of acceptable scopes. """ # Get token info from the tokeninfo endpoint. result = urlfetch.fetch( '%s?%s' % (_TOKENINFO_URL, urllib.urlencode({'access_token': token}))) if result.status_code != 200: try: error_description = json.loads(result.content)['error_description'] except (ValueError, KeyError): error_description = '' _logger.error('Token info endpoint returned status %s: %s', result.status_code, error_description) return token_info = json.loads(result.content) # Validate email. if 'email' not in token_info: _logger.warning('Oauth token doesn\'t include an email address.') return if token_info.get('email_verified') != 'true': _logger.warning('Oauth token email isn\'t verified.') return # Validate client ID. client_id = token_info.get('azp') if (list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK and client_id not in allowed_client_ids): _logger.warning('Client ID is not allowed: %s', client_id) return # Verify at least one of the scopes matches. _, sufficient_scopes = _process_scopes(scopes) authorized_scopes = token_info.get('scope', '').split(' ') if not _are_scopes_sufficient(authorized_scopes, sufficient_scopes): _logger.warning('Oauth token scopes don\'t match any acceptable scopes.') return os.environ[_ENV_AUTH_EMAIL] = token_info['email'] os.environ[_ENV_AUTH_DOMAIN] = '' _logger.debug('Local dev returning user from token.')
python
def _set_bearer_user_vars_local(token, allowed_client_ids, scopes): """Validate the oauth bearer token on the dev server. Since the functions in the oauth module return only example results in local development, this hits the tokeninfo endpoint and attempts to validate the token. If it's valid, we'll set _ENV_AUTH_EMAIL and _ENV_AUTH_DOMAIN so we can get the user from the token. Args: token: String with the oauth token to validate. allowed_client_ids: List of client IDs that are acceptable. scopes: List of acceptable scopes. """ # Get token info from the tokeninfo endpoint. result = urlfetch.fetch( '%s?%s' % (_TOKENINFO_URL, urllib.urlencode({'access_token': token}))) if result.status_code != 200: try: error_description = json.loads(result.content)['error_description'] except (ValueError, KeyError): error_description = '' _logger.error('Token info endpoint returned status %s: %s', result.status_code, error_description) return token_info = json.loads(result.content) # Validate email. if 'email' not in token_info: _logger.warning('Oauth token doesn\'t include an email address.') return if token_info.get('email_verified') != 'true': _logger.warning('Oauth token email isn\'t verified.') return # Validate client ID. client_id = token_info.get('azp') if (list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK and client_id not in allowed_client_ids): _logger.warning('Client ID is not allowed: %s', client_id) return # Verify at least one of the scopes matches. _, sufficient_scopes = _process_scopes(scopes) authorized_scopes = token_info.get('scope', '').split(' ') if not _are_scopes_sufficient(authorized_scopes, sufficient_scopes): _logger.warning('Oauth token scopes don\'t match any acceptable scopes.') return os.environ[_ENV_AUTH_EMAIL] = token_info['email'] os.environ[_ENV_AUTH_DOMAIN] = '' _logger.debug('Local dev returning user from token.')
[ "def", "_set_bearer_user_vars_local", "(", "token", ",", "allowed_client_ids", ",", "scopes", ")", ":", "result", "=", "urlfetch", ".", "fetch", "(", "'%s?%s'", "%", "(", "_TOKENINFO_URL", ",", "urllib", ".", "urlencode", "(", "{", "'access_token'", ":", "toke...
Validate the oauth bearer token on the dev server. Since the functions in the oauth module return only example results in local development, this hits the tokeninfo endpoint and attempts to validate the token. If it's valid, we'll set _ENV_AUTH_EMAIL and _ENV_AUTH_DOMAIN so we can get the user from the token. Args: token: String with the oauth token to validate. allowed_client_ids: List of client IDs that are acceptable. scopes: List of acceptable scopes.
[ "Validate", "the", "oauth", "bearer", "token", "on", "the", "dev", "server", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L413-L463
train
cloudendpoints/endpoints-python
endpoints/users_id_token.py
_verify_parsed_token
def _verify_parsed_token(parsed_token, issuers, audiences, allowed_client_ids, is_legacy_google_auth=True): """Verify a parsed user ID token. Args: parsed_token: The parsed token information. issuers: A list of allowed issuers audiences: The allowed audiences. allowed_client_ids: The allowed client IDs. Returns: True if the token is verified, False otherwise. """ # Verify the issuer. if parsed_token.get('iss') not in issuers: _logger.warning('Issuer was not valid: %s', parsed_token.get('iss')) return False # Check audiences. aud = parsed_token.get('aud') if not aud: _logger.warning('No aud field in token') return False # Special legacy handling if aud == cid. This occurs with iOS and browsers. # As long as audience == client_id and cid is allowed, we need to accept # the audience for compatibility. cid = parsed_token.get('azp') audience_allowed = (aud in audiences) or (is_legacy_google_auth and aud == cid) if not audience_allowed: _logger.warning('Audience not allowed: %s', aud) return False # Check allowed client IDs, for legacy auth. if is_legacy_google_auth: if list(allowed_client_ids) == SKIP_CLIENT_ID_CHECK: _logger.warning('Client ID check can\'t be skipped for ID tokens. ' 'Id_token cannot be verified.') return False elif not cid or cid not in allowed_client_ids: _logger.warning('Client ID is not allowed: %s', cid) return False if 'email' not in parsed_token: return False return True
python
def _verify_parsed_token(parsed_token, issuers, audiences, allowed_client_ids, is_legacy_google_auth=True): """Verify a parsed user ID token. Args: parsed_token: The parsed token information. issuers: A list of allowed issuers audiences: The allowed audiences. allowed_client_ids: The allowed client IDs. Returns: True if the token is verified, False otherwise. """ # Verify the issuer. if parsed_token.get('iss') not in issuers: _logger.warning('Issuer was not valid: %s', parsed_token.get('iss')) return False # Check audiences. aud = parsed_token.get('aud') if not aud: _logger.warning('No aud field in token') return False # Special legacy handling if aud == cid. This occurs with iOS and browsers. # As long as audience == client_id and cid is allowed, we need to accept # the audience for compatibility. cid = parsed_token.get('azp') audience_allowed = (aud in audiences) or (is_legacy_google_auth and aud == cid) if not audience_allowed: _logger.warning('Audience not allowed: %s', aud) return False # Check allowed client IDs, for legacy auth. if is_legacy_google_auth: if list(allowed_client_ids) == SKIP_CLIENT_ID_CHECK: _logger.warning('Client ID check can\'t be skipped for ID tokens. ' 'Id_token cannot be verified.') return False elif not cid or cid not in allowed_client_ids: _logger.warning('Client ID is not allowed: %s', cid) return False if 'email' not in parsed_token: return False return True
[ "def", "_verify_parsed_token", "(", "parsed_token", ",", "issuers", ",", "audiences", ",", "allowed_client_ids", ",", "is_legacy_google_auth", "=", "True", ")", ":", "if", "parsed_token", ".", "get", "(", "'iss'", ")", "not", "in", "issuers", ":", "_logger", "...
Verify a parsed user ID token. Args: parsed_token: The parsed token information. issuers: A list of allowed issuers audiences: The allowed audiences. allowed_client_ids: The allowed client IDs. Returns: True if the token is verified, False otherwise.
[ "Verify", "a", "parsed", "user", "ID", "token", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L470-L514
train
cloudendpoints/endpoints-python
endpoints/users_id_token.py
_get_cert_expiration_time
def _get_cert_expiration_time(headers): """Get the expiration time for a cert, given the response headers. Get expiration time from the headers in the result. If we can't get a time from the headers, this returns 0, indicating that the cert shouldn't be cached. Args: headers: A dict containing the response headers from the request to get certs. Returns: An integer with the number of seconds the cert should be cached. This value is guaranteed to be >= 0. """ # Check the max age of the cert. cache_control = headers.get('Cache-Control', '') # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 indicates only # a comma-separated header is valid, so it should be fine to split this on # commas. for entry in cache_control.split(','): match = _MAX_AGE_REGEX.match(entry) if match: cache_time_seconds = int(match.group(1)) break else: return 0 # Subtract the cert's age. age = headers.get('Age') if age is not None: try: age = int(age) except ValueError: age = 0 cache_time_seconds -= age return max(0, cache_time_seconds)
python
def _get_cert_expiration_time(headers): """Get the expiration time for a cert, given the response headers. Get expiration time from the headers in the result. If we can't get a time from the headers, this returns 0, indicating that the cert shouldn't be cached. Args: headers: A dict containing the response headers from the request to get certs. Returns: An integer with the number of seconds the cert should be cached. This value is guaranteed to be >= 0. """ # Check the max age of the cert. cache_control = headers.get('Cache-Control', '') # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 indicates only # a comma-separated header is valid, so it should be fine to split this on # commas. for entry in cache_control.split(','): match = _MAX_AGE_REGEX.match(entry) if match: cache_time_seconds = int(match.group(1)) break else: return 0 # Subtract the cert's age. age = headers.get('Age') if age is not None: try: age = int(age) except ValueError: age = 0 cache_time_seconds -= age return max(0, cache_time_seconds)
[ "def", "_get_cert_expiration_time", "(", "headers", ")", ":", "cache_control", "=", "headers", ".", "get", "(", "'Cache-Control'", ",", "''", ")", "for", "entry", "in", "cache_control", ".", "split", "(", "','", ")", ":", "match", "=", "_MAX_AGE_REGEX", ".",...
Get the expiration time for a cert, given the response headers. Get expiration time from the headers in the result. If we can't get a time from the headers, this returns 0, indicating that the cert shouldn't be cached. Args: headers: A dict containing the response headers from the request to get certs. Returns: An integer with the number of seconds the cert should be cached. This value is guaranteed to be >= 0.
[ "Get", "the", "expiration", "time", "for", "a", "cert", "given", "the", "response", "headers", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L524-L561
train
cloudendpoints/endpoints-python
endpoints/users_id_token.py
_get_cached_certs
def _get_cached_certs(cert_uri, cache): """Get certs from cache if present; otherwise, gets from URI and caches them. Args: cert_uri: URI from which to retrieve certs if cache is stale or empty. cache: Cache of pre-fetched certs. Returns: The retrieved certs. """ certs = cache.get(cert_uri, namespace=_CERT_NAMESPACE) if certs is None: _logger.debug('Cert cache miss for %s', cert_uri) try: result = urlfetch.fetch(cert_uri) except AssertionError: # This happens in unit tests. Act as if we couldn't get any certs. return None if result.status_code == 200: certs = json.loads(result.content) expiration_time_seconds = _get_cert_expiration_time(result.headers) if expiration_time_seconds: cache.set(cert_uri, certs, time=expiration_time_seconds, namespace=_CERT_NAMESPACE) else: _logger.error( 'Certs not available, HTTP request returned %d', result.status_code) return certs
python
def _get_cached_certs(cert_uri, cache): """Get certs from cache if present; otherwise, gets from URI and caches them. Args: cert_uri: URI from which to retrieve certs if cache is stale or empty. cache: Cache of pre-fetched certs. Returns: The retrieved certs. """ certs = cache.get(cert_uri, namespace=_CERT_NAMESPACE) if certs is None: _logger.debug('Cert cache miss for %s', cert_uri) try: result = urlfetch.fetch(cert_uri) except AssertionError: # This happens in unit tests. Act as if we couldn't get any certs. return None if result.status_code == 200: certs = json.loads(result.content) expiration_time_seconds = _get_cert_expiration_time(result.headers) if expiration_time_seconds: cache.set(cert_uri, certs, time=expiration_time_seconds, namespace=_CERT_NAMESPACE) else: _logger.error( 'Certs not available, HTTP request returned %d', result.status_code) return certs
[ "def", "_get_cached_certs", "(", "cert_uri", ",", "cache", ")", ":", "certs", "=", "cache", ".", "get", "(", "cert_uri", ",", "namespace", "=", "_CERT_NAMESPACE", ")", "if", "certs", "is", "None", ":", "_logger", ".", "debug", "(", "'Cert cache miss for %s'"...
Get certs from cache if present; otherwise, gets from URI and caches them. Args: cert_uri: URI from which to retrieve certs if cache is stale or empty. cache: Cache of pre-fetched certs. Returns: The retrieved certs.
[ "Get", "certs", "from", "cache", "if", "present", ";", "otherwise", "gets", "from", "URI", "and", "caches", "them", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L564-L593
train
cloudendpoints/endpoints-python
endpoints/users_id_token.py
_verify_signed_jwt_with_certs
def _verify_signed_jwt_with_certs( jwt, time_now, cache, cert_uri=_DEFAULT_CERT_URI): """Verify a JWT against public certs. See http://self-issued.info/docs/draft-jones-json-web-token.html. The PyCrypto library included with Google App Engine is severely limited and so you have to use it very carefully to verify JWT signatures. The first issue is that the library can't read X.509 files, so we make a call to a special URI that has the public cert in modulus/exponent form in JSON. The second issue is that the RSA.verify method doesn't work, at least for how the JWT tokens are signed, so we have to manually verify the signature of the JWT, which means hashing the signed part of the JWT and comparing that to the signature that's been encrypted with the public key. Args: jwt: string, A JWT. time_now: The current time, as a long (eg. long(time.time())). cache: Cache to use (eg. the memcache module). cert_uri: string, URI to get cert modulus and exponent in JSON format. Returns: dict, The deserialized JSON payload in the JWT. Raises: _AppIdentityError: if any checks are failed. """ segments = jwt.split('.') if len(segments) != 3: # Note that anywhere we print the jwt or its json body, we need to use # %r instead of %s, so that non-printable characters are escaped safely. raise _AppIdentityError('Token is not an id_token (Wrong number of ' 'segments)') signed = '%s.%s' % (segments[0], segments[1]) signature = _urlsafe_b64decode(segments[2]) # pycrypto only deals in integers, so we have to convert the string of bytes # into a long. lsignature = long(signature.encode('hex'), 16) # Verify expected header. header_body = _urlsafe_b64decode(segments[0]) try: header = json.loads(header_body) except: raise _AppIdentityError("Can't parse header") if header.get('alg') != 'RS256': raise _AppIdentityError('Unexpected encryption algorithm: %r' % header.get('alg')) # Formerly we would parse the token body here. # However, it's not safe to do that without first checking the signature. certs = _get_cached_certs(cert_uri, cache) if certs is None: raise _AppIdentityError( 'Unable to retrieve certs needed to verify the signed JWT') # Verify that we were able to load the Crypto libraries, before we try # to use them. if not _CRYPTO_LOADED: raise _AppIdentityError('Unable to load pycrypto library. Can\'t verify ' 'id_token signature. See http://www.pycrypto.org ' 'for more information on pycrypto.') # SHA256 hash of the already 'signed' segment from the JWT. Since a SHA256 # hash, will always have length 64. local_hash = SHA256.new(signed).hexdigest() # Check signature. verified = False for keyvalue in certs['keyvalues']: try: modulus = _b64_to_long(keyvalue['modulus']) exponent = _b64_to_long(keyvalue['exponent']) key = RSA.construct((modulus, exponent)) # Encrypt, and convert to a hex string. hexsig = '%064x' % key.encrypt(lsignature, '')[0] # Make sure we have only last 64 base64 chars hexsig = hexsig[-64:] # Check the signature on 'signed' by encrypting 'signature' with the # public key and confirming the result matches the SHA256 hash of # 'signed'. hmac.compare_digest(a, b) is used to avoid timing attacks. verified = hmac.compare_digest(hexsig, local_hash) if verified: break except Exception, e: # pylint: disable=broad-except # Log the exception for debugging purpose. _logger.debug( 'Signature verification error: %s; continuing with the next cert.', e) continue if not verified: raise _AppIdentityError('Invalid token signature') # Parse token. json_body = _urlsafe_b64decode(segments[1]) try: parsed = json.loads(json_body) except: raise _AppIdentityError("Can't parse token body") # Check creation timestamp. iat = parsed.get('iat') if iat is None: raise _AppIdentityError('No iat field in token') earliest = iat - _CLOCK_SKEW_SECS # Check expiration timestamp. exp = parsed.get('exp') if exp is None: raise _AppIdentityError('No exp field in token') if exp >= time_now + _MAX_TOKEN_LIFETIME_SECS: raise _AppIdentityError('exp field too far in future') latest = exp + _CLOCK_SKEW_SECS if time_now < earliest: raise _AppIdentityError('Token used too early, %d < %d' % (time_now, earliest)) if time_now > latest: raise _AppIdentityError('Token used too late, %d > %d' % (time_now, latest)) return parsed
python
def _verify_signed_jwt_with_certs( jwt, time_now, cache, cert_uri=_DEFAULT_CERT_URI): """Verify a JWT against public certs. See http://self-issued.info/docs/draft-jones-json-web-token.html. The PyCrypto library included with Google App Engine is severely limited and so you have to use it very carefully to verify JWT signatures. The first issue is that the library can't read X.509 files, so we make a call to a special URI that has the public cert in modulus/exponent form in JSON. The second issue is that the RSA.verify method doesn't work, at least for how the JWT tokens are signed, so we have to manually verify the signature of the JWT, which means hashing the signed part of the JWT and comparing that to the signature that's been encrypted with the public key. Args: jwt: string, A JWT. time_now: The current time, as a long (eg. long(time.time())). cache: Cache to use (eg. the memcache module). cert_uri: string, URI to get cert modulus and exponent in JSON format. Returns: dict, The deserialized JSON payload in the JWT. Raises: _AppIdentityError: if any checks are failed. """ segments = jwt.split('.') if len(segments) != 3: # Note that anywhere we print the jwt or its json body, we need to use # %r instead of %s, so that non-printable characters are escaped safely. raise _AppIdentityError('Token is not an id_token (Wrong number of ' 'segments)') signed = '%s.%s' % (segments[0], segments[1]) signature = _urlsafe_b64decode(segments[2]) # pycrypto only deals in integers, so we have to convert the string of bytes # into a long. lsignature = long(signature.encode('hex'), 16) # Verify expected header. header_body = _urlsafe_b64decode(segments[0]) try: header = json.loads(header_body) except: raise _AppIdentityError("Can't parse header") if header.get('alg') != 'RS256': raise _AppIdentityError('Unexpected encryption algorithm: %r' % header.get('alg')) # Formerly we would parse the token body here. # However, it's not safe to do that without first checking the signature. certs = _get_cached_certs(cert_uri, cache) if certs is None: raise _AppIdentityError( 'Unable to retrieve certs needed to verify the signed JWT') # Verify that we were able to load the Crypto libraries, before we try # to use them. if not _CRYPTO_LOADED: raise _AppIdentityError('Unable to load pycrypto library. Can\'t verify ' 'id_token signature. See http://www.pycrypto.org ' 'for more information on pycrypto.') # SHA256 hash of the already 'signed' segment from the JWT. Since a SHA256 # hash, will always have length 64. local_hash = SHA256.new(signed).hexdigest() # Check signature. verified = False for keyvalue in certs['keyvalues']: try: modulus = _b64_to_long(keyvalue['modulus']) exponent = _b64_to_long(keyvalue['exponent']) key = RSA.construct((modulus, exponent)) # Encrypt, and convert to a hex string. hexsig = '%064x' % key.encrypt(lsignature, '')[0] # Make sure we have only last 64 base64 chars hexsig = hexsig[-64:] # Check the signature on 'signed' by encrypting 'signature' with the # public key and confirming the result matches the SHA256 hash of # 'signed'. hmac.compare_digest(a, b) is used to avoid timing attacks. verified = hmac.compare_digest(hexsig, local_hash) if verified: break except Exception, e: # pylint: disable=broad-except # Log the exception for debugging purpose. _logger.debug( 'Signature verification error: %s; continuing with the next cert.', e) continue if not verified: raise _AppIdentityError('Invalid token signature') # Parse token. json_body = _urlsafe_b64decode(segments[1]) try: parsed = json.loads(json_body) except: raise _AppIdentityError("Can't parse token body") # Check creation timestamp. iat = parsed.get('iat') if iat is None: raise _AppIdentityError('No iat field in token') earliest = iat - _CLOCK_SKEW_SECS # Check expiration timestamp. exp = parsed.get('exp') if exp is None: raise _AppIdentityError('No exp field in token') if exp >= time_now + _MAX_TOKEN_LIFETIME_SECS: raise _AppIdentityError('exp field too far in future') latest = exp + _CLOCK_SKEW_SECS if time_now < earliest: raise _AppIdentityError('Token used too early, %d < %d' % (time_now, earliest)) if time_now > latest: raise _AppIdentityError('Token used too late, %d > %d' % (time_now, latest)) return parsed
[ "def", "_verify_signed_jwt_with_certs", "(", "jwt", ",", "time_now", ",", "cache", ",", "cert_uri", "=", "_DEFAULT_CERT_URI", ")", ":", "segments", "=", "jwt", ".", "split", "(", "'.'", ")", "if", "len", "(", "segments", ")", "!=", "3", ":", "raise", "_A...
Verify a JWT against public certs. See http://self-issued.info/docs/draft-jones-json-web-token.html. The PyCrypto library included with Google App Engine is severely limited and so you have to use it very carefully to verify JWT signatures. The first issue is that the library can't read X.509 files, so we make a call to a special URI that has the public cert in modulus/exponent form in JSON. The second issue is that the RSA.verify method doesn't work, at least for how the JWT tokens are signed, so we have to manually verify the signature of the JWT, which means hashing the signed part of the JWT and comparing that to the signature that's been encrypted with the public key. Args: jwt: string, A JWT. time_now: The current time, as a long (eg. long(time.time())). cache: Cache to use (eg. the memcache module). cert_uri: string, URI to get cert modulus and exponent in JSON format. Returns: dict, The deserialized JSON payload in the JWT. Raises: _AppIdentityError: if any checks are failed.
[ "Verify", "a", "JWT", "against", "public", "certs", "." ]
00dd7c7a52a9ee39d5923191c2604b8eafdb3f24
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L603-L732
train