partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
get_root_resource
Returns the root resource.
nefertari/resource.py
def get_root_resource(config): """Returns the root resource.""" app_package_name = get_app_package_name(config) return config.registry._root_resources.setdefault( app_package_name, Resource(config))
def get_root_resource(config): """Returns the root resource.""" app_package_name = get_app_package_name(config) return config.registry._root_resources.setdefault( app_package_name, Resource(config))
[ "Returns", "the", "root", "resource", "." ]
ramses-tech/nefertari
python
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/resource.py#L46-L50
[ "def", "get_root_resource", "(", "config", ")", ":", "app_package_name", "=", "get_app_package_name", "(", "config", ")", "return", "config", ".", "registry", ".", "_root_resources", ".", "setdefault", "(", "app_package_name", ",", "Resource", "(", "config", ")", ...
c7caffe11576c11aa111adbdbadeff70ce66b1dd
train
add_resource_routes
``view`` is a dotted name of (or direct reference to) a Python view class, e.g. ``'my.package.views.MyView'``. ``member_name`` should be the appropriate singular version of the resource given your locale and used with members of the collection. ``collection_name`` will be used to refer to the resource collection methods and should be a plural version of the member_name argument. All keyword arguments are optional. ``path_prefix`` Prepends the URL path for the Route with the path_prefix given. This is most useful for cases where you want to mix resources or relations between resources. ``name_prefix`` Prepends the route names that are generated with the name_prefix given. Combined with the path_prefix option, it's easy to generate route names and paths that represent resources that are in relations. Example:: config.add_resource_routes( 'myproject.views:CategoryView', 'message', 'messages', path_prefix='/category/{category_id}', name_prefix="category_") # GET /category/7/messages/1 # has named route "category_message"
nefertari/resource.py
def add_resource_routes(config, view, member_name, collection_name, **kwargs): """ ``view`` is a dotted name of (or direct reference to) a Python view class, e.g. ``'my.package.views.MyView'``. ``member_name`` should be the appropriate singular version of the resource given your locale and used with members of the collection. ``collection_name`` will be used to refer to the resource collection methods and should be a plural version of the member_name argument. All keyword arguments are optional. ``path_prefix`` Prepends the URL path for the Route with the path_prefix given. This is most useful for cases where you want to mix resources or relations between resources. ``name_prefix`` Prepends the route names that are generated with the name_prefix given. Combined with the path_prefix option, it's easy to generate route names and paths that represent resources that are in relations. Example:: config.add_resource_routes( 'myproject.views:CategoryView', 'message', 'messages', path_prefix='/category/{category_id}', name_prefix="category_") # GET /category/7/messages/1 # has named route "category_message" """ view = maybe_dotted(view) path_prefix = kwargs.pop('path_prefix', '') name_prefix = kwargs.pop('name_prefix', '') if config.route_prefix: name_prefix = "%s_%s" % (config.route_prefix, name_prefix) if collection_name: id_name = '/{%s}' % (kwargs.pop('id_name', None) or DEFAULT_ID_NAME) else: id_name = '' path = path_prefix.strip('/') + '/' + (collection_name or member_name) _factory = kwargs.pop('factory', None) # If factory is not set, than auth should be False _auth = kwargs.pop('auth', None) and _factory _traverse = (kwargs.pop('traverse', None) or id_name) if _factory else None action_route = {} added_routes = {} def add_route_and_view(config, action, route_name, path, request_method, **route_kwargs): if route_name not in added_routes: config.add_route( route_name, path, factory=_factory, request_method=['GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'OPTIONS'], **route_kwargs) added_routes[route_name] = path action_route[action] = route_name if _auth: permission = PERMISSIONS[action] else: permission = None config.add_view(view=view, attr=action, route_name=route_name, request_method=request_method, permission=permission, **kwargs) config.commit() if collection_name == member_name: collection_name = collection_name + '_collection' if collection_name: add_route_and_view( config, 'index', name_prefix + collection_name, path, 'GET') add_route_and_view( config, 'collection_options', name_prefix + collection_name, path, 'OPTIONS') add_route_and_view( config, 'show', name_prefix + member_name, path + id_name, 'GET', traverse=_traverse) add_route_and_view( config, 'item_options', name_prefix + member_name, path + id_name, 'OPTIONS', traverse=_traverse) add_route_and_view( config, 'replace', name_prefix + member_name, path + id_name, 'PUT', traverse=_traverse) add_route_and_view( config, 'update', name_prefix + member_name, path + id_name, 'PATCH', traverse=_traverse) add_route_and_view( config, 'create', name_prefix + (collection_name or member_name), path, 'POST') add_route_and_view( config, 'delete', name_prefix + member_name, path + id_name, 'DELETE', traverse=_traverse) if collection_name: add_route_and_view( config, 'update_many', name_prefix + (collection_name or member_name), path, 'PUT', traverse=_traverse) add_route_and_view( config, 'update_many', name_prefix + (collection_name or member_name), path, 'PATCH', traverse=_traverse) add_route_and_view( config, 'delete_many', name_prefix + (collection_name or member_name), path, 'DELETE', traverse=_traverse) return action_route
def add_resource_routes(config, view, member_name, collection_name, **kwargs): """ ``view`` is a dotted name of (or direct reference to) a Python view class, e.g. ``'my.package.views.MyView'``. ``member_name`` should be the appropriate singular version of the resource given your locale and used with members of the collection. ``collection_name`` will be used to refer to the resource collection methods and should be a plural version of the member_name argument. All keyword arguments are optional. ``path_prefix`` Prepends the URL path for the Route with the path_prefix given. This is most useful for cases where you want to mix resources or relations between resources. ``name_prefix`` Prepends the route names that are generated with the name_prefix given. Combined with the path_prefix option, it's easy to generate route names and paths that represent resources that are in relations. Example:: config.add_resource_routes( 'myproject.views:CategoryView', 'message', 'messages', path_prefix='/category/{category_id}', name_prefix="category_") # GET /category/7/messages/1 # has named route "category_message" """ view = maybe_dotted(view) path_prefix = kwargs.pop('path_prefix', '') name_prefix = kwargs.pop('name_prefix', '') if config.route_prefix: name_prefix = "%s_%s" % (config.route_prefix, name_prefix) if collection_name: id_name = '/{%s}' % (kwargs.pop('id_name', None) or DEFAULT_ID_NAME) else: id_name = '' path = path_prefix.strip('/') + '/' + (collection_name or member_name) _factory = kwargs.pop('factory', None) # If factory is not set, than auth should be False _auth = kwargs.pop('auth', None) and _factory _traverse = (kwargs.pop('traverse', None) or id_name) if _factory else None action_route = {} added_routes = {} def add_route_and_view(config, action, route_name, path, request_method, **route_kwargs): if route_name not in added_routes: config.add_route( route_name, path, factory=_factory, request_method=['GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'OPTIONS'], **route_kwargs) added_routes[route_name] = path action_route[action] = route_name if _auth: permission = PERMISSIONS[action] else: permission = None config.add_view(view=view, attr=action, route_name=route_name, request_method=request_method, permission=permission, **kwargs) config.commit() if collection_name == member_name: collection_name = collection_name + '_collection' if collection_name: add_route_and_view( config, 'index', name_prefix + collection_name, path, 'GET') add_route_and_view( config, 'collection_options', name_prefix + collection_name, path, 'OPTIONS') add_route_and_view( config, 'show', name_prefix + member_name, path + id_name, 'GET', traverse=_traverse) add_route_and_view( config, 'item_options', name_prefix + member_name, path + id_name, 'OPTIONS', traverse=_traverse) add_route_and_view( config, 'replace', name_prefix + member_name, path + id_name, 'PUT', traverse=_traverse) add_route_and_view( config, 'update', name_prefix + member_name, path + id_name, 'PATCH', traverse=_traverse) add_route_and_view( config, 'create', name_prefix + (collection_name or member_name), path, 'POST') add_route_and_view( config, 'delete', name_prefix + member_name, path + id_name, 'DELETE', traverse=_traverse) if collection_name: add_route_and_view( config, 'update_many', name_prefix + (collection_name or member_name), path, 'PUT', traverse=_traverse) add_route_and_view( config, 'update_many', name_prefix + (collection_name or member_name), path, 'PATCH', traverse=_traverse) add_route_and_view( config, 'delete_many', name_prefix + (collection_name or member_name), path, 'DELETE', traverse=_traverse) return action_route
[ "view", "is", "a", "dotted", "name", "of", "(", "or", "direct", "reference", "to", ")", "a", "Python", "view", "class", "e", ".", "g", ".", "my", ".", "package", ".", "views", ".", "MyView", "." ]
ramses-tech/nefertari
python
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/resource.py#L57-L190
[ "def", "add_resource_routes", "(", "config", ",", "view", ",", "member_name", ",", "collection_name", ",", "*", "*", "kwargs", ")", ":", "view", "=", "maybe_dotted", "(", "view", ")", "path_prefix", "=", "kwargs", ".", "pop", "(", "'path_prefix'", ",", "''...
c7caffe11576c11aa111adbdbadeff70ce66b1dd
train
get_default_view_path
Returns the dotted path to the default view class.
nefertari/resource.py
def get_default_view_path(resource): "Returns the dotted path to the default view class." parts = [a.member_name for a in resource.ancestors] +\ [resource.collection_name or resource.member_name] if resource.prefix: parts.insert(-1, resource.prefix) view_file = '%s' % '_'.join(parts) view = '%s:%sView' % (view_file, snake2camel(view_file)) app_package_name = get_app_package_name(resource.config) return '%s.views.%s' % (app_package_name, view)
def get_default_view_path(resource): "Returns the dotted path to the default view class." parts = [a.member_name for a in resource.ancestors] +\ [resource.collection_name or resource.member_name] if resource.prefix: parts.insert(-1, resource.prefix) view_file = '%s' % '_'.join(parts) view = '%s:%sView' % (view_file, snake2camel(view_file)) app_package_name = get_app_package_name(resource.config) return '%s.views.%s' % (app_package_name, view)
[ "Returns", "the", "dotted", "path", "to", "the", "default", "view", "class", "." ]
ramses-tech/nefertari
python
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/resource.py#L193-L206
[ "def", "get_default_view_path", "(", "resource", ")", ":", "parts", "=", "[", "a", ".", "member_name", "for", "a", "in", "resource", ".", "ancestors", "]", "+", "[", "resource", ".", "collection_name", "or", "resource", ".", "member_name", "]", "if", "reso...
c7caffe11576c11aa111adbdbadeff70ce66b1dd
train
Resource.get_ancestors
Returns the list of ancestor resources.
nefertari/resource.py
def get_ancestors(self): "Returns the list of ancestor resources." if self._ancestors: return self._ancestors if not self.parent: return [] obj = self.resource_map.get(self.parent.uid) while obj and obj.member_name: self._ancestors.append(obj) obj = obj.parent self._ancestors.reverse() return self._ancestors
def get_ancestors(self): "Returns the list of ancestor resources." if self._ancestors: return self._ancestors if not self.parent: return [] obj = self.resource_map.get(self.parent.uid) while obj and obj.member_name: self._ancestors.append(obj) obj = obj.parent self._ancestors.reverse() return self._ancestors
[ "Returns", "the", "list", "of", "ancestor", "resources", "." ]
ramses-tech/nefertari
python
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/resource.py#L231-L247
[ "def", "get_ancestors", "(", "self", ")", ":", "if", "self", ".", "_ancestors", ":", "return", "self", ".", "_ancestors", "if", "not", "self", ".", "parent", ":", "return", "[", "]", "obj", "=", "self", ".", "resource_map", ".", "get", "(", "self", "...
c7caffe11576c11aa111adbdbadeff70ce66b1dd
train
Resource.add
:param member_name: singular name of the resource. It should be the appropriate singular version of the resource given your locale and used with members of the collection. :param collection_name: plural name of the resource. It will be used to refer to the resource collection methods and should be a plural version of the ``member_name`` argument. Note: if collection_name is empty, it means resource is singular :param parent: parent resource name or object. :param uid: unique name for the resource :param kwargs: view: custom view to overwrite the default one. the rest of the keyward arguments are passed to add_resource_routes call. :return: ResourceMap object
nefertari/resource.py
def add(self, member_name, collection_name='', parent=None, uid='', **kwargs): """ :param member_name: singular name of the resource. It should be the appropriate singular version of the resource given your locale and used with members of the collection. :param collection_name: plural name of the resource. It will be used to refer to the resource collection methods and should be a plural version of the ``member_name`` argument. Note: if collection_name is empty, it means resource is singular :param parent: parent resource name or object. :param uid: unique name for the resource :param kwargs: view: custom view to overwrite the default one. the rest of the keyward arguments are passed to add_resource_routes call. :return: ResourceMap object """ # self is the parent resource on which this method is called. parent = (self.resource_map.get(parent) if type(parent) is str else parent or self) prefix = kwargs.pop('prefix', '') uid = (uid or ':'.join(filter(bool, [parent.uid, prefix, member_name]))) if uid in self.resource_map: raise ValueError('%s already exists in resource map' % uid) # Use id_name of parent for singular views to make url generation # easier id_name = kwargs.get('id_name', '') if not id_name and parent: id_name = parent.id_name new_resource = Resource(self.config, member_name=member_name, collection_name=collection_name, parent=parent, uid=uid, id_name=id_name, prefix=prefix) view = maybe_dotted( kwargs.pop('view', None) or get_default_view_path(new_resource)) for name, val in kwargs.pop('view_args', {}).items(): setattr(view, name, val) root_resource = self.config.get_root_resource() view.root_resource = root_resource new_resource.view = view path_segs = [] kwargs['path_prefix'] = '' for res in new_resource.ancestors: if not res.is_singular: if res.id_name: id_full = res.id_name else: id_full = "%s_%s" % (res.member_name, DEFAULT_ID_NAME) path_segs.append('%s/{%s}' % (res.collection_name, id_full)) else: path_segs.append(res.member_name) if path_segs: kwargs['path_prefix'] = '/'.join(path_segs) if prefix: kwargs['path_prefix'] += '/' + prefix name_segs = [a.member_name for a in new_resource.ancestors] name_segs.insert(1, prefix) name_segs = [seg for seg in name_segs if seg] if name_segs: kwargs['name_prefix'] = '_'.join(name_segs) + ':' new_resource.renderer = kwargs.setdefault( 'renderer', view._default_renderer) kwargs.setdefault('auth', root_resource.auth) kwargs.setdefault('factory', root_resource.default_factory) _factory = maybe_dotted(kwargs['factory']) kwargs['auth'] = kwargs.get('auth', root_resource.auth) kwargs['http_cache'] = kwargs.get( 'http_cache', root_resource.http_cache) new_resource.action_route_map = add_resource_routes( self.config, view, member_name, collection_name, **kwargs) self.resource_map[uid] = new_resource # add all route names for this resource as keys in the dict, # so its easy to find it in the view. self.resource_map.update(dict.fromkeys( list(new_resource.action_route_map.values()), new_resource)) # Store resources in {modelName: resource} map if: # * Its view has Model defined # * It's not singular # * Its parent is root or it's not already stored model = new_resource.view.Model is_collection = model is not None and not new_resource.is_singular if is_collection: is_needed = (model.__name__ not in self.model_collections or new_resource.parent is root_resource) if is_needed: self.model_collections[model.__name__] = new_resource parent.children.append(new_resource) view._resource = new_resource view._factory = _factory return new_resource
def add(self, member_name, collection_name='', parent=None, uid='', **kwargs): """ :param member_name: singular name of the resource. It should be the appropriate singular version of the resource given your locale and used with members of the collection. :param collection_name: plural name of the resource. It will be used to refer to the resource collection methods and should be a plural version of the ``member_name`` argument. Note: if collection_name is empty, it means resource is singular :param parent: parent resource name or object. :param uid: unique name for the resource :param kwargs: view: custom view to overwrite the default one. the rest of the keyward arguments are passed to add_resource_routes call. :return: ResourceMap object """ # self is the parent resource on which this method is called. parent = (self.resource_map.get(parent) if type(parent) is str else parent or self) prefix = kwargs.pop('prefix', '') uid = (uid or ':'.join(filter(bool, [parent.uid, prefix, member_name]))) if uid in self.resource_map: raise ValueError('%s already exists in resource map' % uid) # Use id_name of parent for singular views to make url generation # easier id_name = kwargs.get('id_name', '') if not id_name and parent: id_name = parent.id_name new_resource = Resource(self.config, member_name=member_name, collection_name=collection_name, parent=parent, uid=uid, id_name=id_name, prefix=prefix) view = maybe_dotted( kwargs.pop('view', None) or get_default_view_path(new_resource)) for name, val in kwargs.pop('view_args', {}).items(): setattr(view, name, val) root_resource = self.config.get_root_resource() view.root_resource = root_resource new_resource.view = view path_segs = [] kwargs['path_prefix'] = '' for res in new_resource.ancestors: if not res.is_singular: if res.id_name: id_full = res.id_name else: id_full = "%s_%s" % (res.member_name, DEFAULT_ID_NAME) path_segs.append('%s/{%s}' % (res.collection_name, id_full)) else: path_segs.append(res.member_name) if path_segs: kwargs['path_prefix'] = '/'.join(path_segs) if prefix: kwargs['path_prefix'] += '/' + prefix name_segs = [a.member_name for a in new_resource.ancestors] name_segs.insert(1, prefix) name_segs = [seg for seg in name_segs if seg] if name_segs: kwargs['name_prefix'] = '_'.join(name_segs) + ':' new_resource.renderer = kwargs.setdefault( 'renderer', view._default_renderer) kwargs.setdefault('auth', root_resource.auth) kwargs.setdefault('factory', root_resource.default_factory) _factory = maybe_dotted(kwargs['factory']) kwargs['auth'] = kwargs.get('auth', root_resource.auth) kwargs['http_cache'] = kwargs.get( 'http_cache', root_resource.http_cache) new_resource.action_route_map = add_resource_routes( self.config, view, member_name, collection_name, **kwargs) self.resource_map[uid] = new_resource # add all route names for this resource as keys in the dict, # so its easy to find it in the view. self.resource_map.update(dict.fromkeys( list(new_resource.action_route_map.values()), new_resource)) # Store resources in {modelName: resource} map if: # * Its view has Model defined # * It's not singular # * Its parent is root or it's not already stored model = new_resource.view.Model is_collection = model is not None and not new_resource.is_singular if is_collection: is_needed = (model.__name__ not in self.model_collections or new_resource.parent is root_resource) if is_needed: self.model_collections[model.__name__] = new_resource parent.children.append(new_resource) view._resource = new_resource view._factory = _factory return new_resource
[ ":", "param", "member_name", ":", "singular", "name", "of", "the", "resource", ".", "It", "should", "be", "the", "appropriate", "singular", "version", "of", "the", "resource", "given", "your", "locale", "and", "used", "with", "members", "of", "the", "collect...
ramses-tech/nefertari
python
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/resource.py#L257-L379
[ "def", "add", "(", "self", ",", "member_name", ",", "collection_name", "=", "''", ",", "parent", "=", "None", ",", "uid", "=", "''", ",", "*", "*", "kwargs", ")", ":", "# self is the parent resource on which this method is called.", "parent", "=", "(", "self",...
c7caffe11576c11aa111adbdbadeff70ce66b1dd
train
Resource.add_from_child
Add a resource with its all children resources to the current resource.
nefertari/resource.py
def add_from_child(self, resource, **kwargs): """ Add a resource with its all children resources to the current resource. """ new_resource = self.add( resource.member_name, resource.collection_name, **kwargs) for child in resource.children: new_resource.add_from_child(child, **kwargs)
def add_from_child(self, resource, **kwargs): """ Add a resource with its all children resources to the current resource. """ new_resource = self.add( resource.member_name, resource.collection_name, **kwargs) for child in resource.children: new_resource.add_from_child(child, **kwargs)
[ "Add", "a", "resource", "with", "its", "all", "children", "resources", "to", "the", "current", "resource", "." ]
ramses-tech/nefertari
python
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/resource.py#L381-L389
[ "def", "add_from_child", "(", "self", ",", "resource", ",", "*", "*", "kwargs", ")", ":", "new_resource", "=", "self", ".", "add", "(", "resource", ".", "member_name", ",", "resource", ".", "collection_name", ",", "*", "*", "kwargs", ")", "for", "child",...
c7caffe11576c11aa111adbdbadeff70ce66b1dd
train
DatasetContainer.add
Add the path of a data set to the list of available sets NOTE: a data set is assumed to be a pickled and gzip compressed Pandas DataFrame Parameters ---------- path : str
opengrid/datasets/datasets.py
def add(self, path): """ Add the path of a data set to the list of available sets NOTE: a data set is assumed to be a pickled and gzip compressed Pandas DataFrame Parameters ---------- path : str """ name_with_ext = os.path.split(path)[1] # split directory and filename name = name_with_ext.split('.')[0] # remove extension self.list.update({name: path})
def add(self, path): """ Add the path of a data set to the list of available sets NOTE: a data set is assumed to be a pickled and gzip compressed Pandas DataFrame Parameters ---------- path : str """ name_with_ext = os.path.split(path)[1] # split directory and filename name = name_with_ext.split('.')[0] # remove extension self.list.update({name: path})
[ "Add", "the", "path", "of", "a", "data", "set", "to", "the", "list", "of", "available", "sets" ]
opengridcc/opengrid
python
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/datasets/datasets.py#L27-L40
[ "def", "add", "(", "self", ",", "path", ")", ":", "name_with_ext", "=", "os", ".", "path", ".", "split", "(", "path", ")", "[", "1", "]", "# split directory and filename", "name", "=", "name_with_ext", ".", "split", "(", "'.'", ")", "[", "0", "]", "#...
69b8da3c8fcea9300226c45ef0628cd6d4307651
train
DatasetContainer.unpack
Unpacks a data set to a Pandas DataFrame Parameters ---------- name : str call `.list` to see all availble datasets Returns ------- pd.DataFrame
opengrid/datasets/datasets.py
def unpack(self, name): """ Unpacks a data set to a Pandas DataFrame Parameters ---------- name : str call `.list` to see all availble datasets Returns ------- pd.DataFrame """ path = self.list[name] df = pd.read_pickle(path, compression='gzip') return df
def unpack(self, name): """ Unpacks a data set to a Pandas DataFrame Parameters ---------- name : str call `.list` to see all availble datasets Returns ------- pd.DataFrame """ path = self.list[name] df = pd.read_pickle(path, compression='gzip') return df
[ "Unpacks", "a", "data", "set", "to", "a", "Pandas", "DataFrame" ]
opengridcc/opengrid
python
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/datasets/datasets.py#L42-L57
[ "def", "unpack", "(", "self", ",", "name", ")", ":", "path", "=", "self", ".", "list", "[", "name", "]", "df", "=", "pd", ".", "read_pickle", "(", "path", ",", "compression", "=", "'gzip'", ")", "return", "df" ]
69b8da3c8fcea9300226c45ef0628cd6d4307651
train
six_frame
translate each sequence into six reading frames
ctbBio/sixframe.py
def six_frame(genome, table, minimum = 10): """ translate each sequence into six reading frames """ for seq in parse_fasta(genome): dna = Seq(seq[1].upper().replace('U', 'T'), IUPAC.ambiguous_dna) counter = 0 for sequence in ['f', dna], ['rc', dna.reverse_complement()]: direction, sequence = sequence for frame in range(0, 3): for prot in \ sequence[frame:].\ translate(table = table, to_stop = False).split('*'): if len(prot) < minimum: continue counter += 1 header = '%s_%s table=%s frame=%s-%s %s' % \ (seq[0].split()[0], counter, table, frame+1, \ direction, ' '.join(seq[0].split()[1:])) yield [header, prot]
def six_frame(genome, table, minimum = 10): """ translate each sequence into six reading frames """ for seq in parse_fasta(genome): dna = Seq(seq[1].upper().replace('U', 'T'), IUPAC.ambiguous_dna) counter = 0 for sequence in ['f', dna], ['rc', dna.reverse_complement()]: direction, sequence = sequence for frame in range(0, 3): for prot in \ sequence[frame:].\ translate(table = table, to_stop = False).split('*'): if len(prot) < minimum: continue counter += 1 header = '%s_%s table=%s frame=%s-%s %s' % \ (seq[0].split()[0], counter, table, frame+1, \ direction, ' '.join(seq[0].split()[1:])) yield [header, prot]
[ "translate", "each", "sequence", "into", "six", "reading", "frames" ]
christophertbrown/bioscripts
python
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/sixframe.py#L13-L32
[ "def", "six_frame", "(", "genome", ",", "table", ",", "minimum", "=", "10", ")", ":", "for", "seq", "in", "parse_fasta", "(", "genome", ")", ":", "dna", "=", "Seq", "(", "seq", "[", "1", "]", ".", "upper", "(", ")", ".", "replace", "(", "'U'", ...
83b2566b3a5745437ec651cd6cafddd056846240
train
publish_processed_network_packets
# Redis/RabbitMQ/SQS messaging endpoints for pub-sub routing_key = ev("PUBLISH_EXCHANGE", "reporting.accounts") queue_name = ev("PUBLISH_QUEUE", "reporting.accounts") auth_url = ev("PUB_BROKER_URL", "redis://localhost:6379/15") serializer = "json"
network_pipeline/scripts/network_agent.py
def publish_processed_network_packets( name="not-set", task_queue=None, result_queue=None, need_response=False, shutdown_msg="SHUTDOWN"): """ # Redis/RabbitMQ/SQS messaging endpoints for pub-sub routing_key = ev("PUBLISH_EXCHANGE", "reporting.accounts") queue_name = ev("PUBLISH_QUEUE", "reporting.accounts") auth_url = ev("PUB_BROKER_URL", "redis://localhost:6379/15") serializer = "json" """ # these keys need to be cycled to prevent # exploiting static keys filter_key = ev("IGNORE_KEY", INCLUDED_IGNORE_KEY) forward_host = ev("FORWARD_HOST", "127.0.0.1") forward_port = int(ev("FORWARD_PORT", "80")) include_filter_key = ev("FILTER_KEY", "") if not include_filter_key and filter_key: include_filter_key = filter_key filter_keys = [filter_key] log.info(("START consumer={} " "forward={}:{} with " "key={} filters={}") .format(name, forward_host, forward_port, include_filter_key, filter_key)) forward_skt = None not_done = True while not_done: if not forward_skt: forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) next_task = task_queue.get() if next_task: if str(next_task) == shutdown_msg: # Poison pill for shutting down log.info(("{}: DONE CALLBACK " "Exiting msg={}") .format(name, next_task)) task_queue.task_done() break # end of handling shutdown case try: log.debug(("{} parsing") .format(name)) source = next_task.source packet = next_task.payload if not packet: log.error(("{} invalid task found " "{} missing payload") .format(name, next_task)) break log.debug(("{} found msg from src={}") .format(name, source)) network_data = parse_network_data( data_packet=packet, include_filter_key=include_filter_key, filter_keys=filter_keys) if network_data["status"] == VALID: if network_data["data_type"] == TCP \ or network_data["data_type"] == UDP \ or network_data["data_type"] == ARP \ or network_data["data_type"] == ICMP: log.info(("{} valid={} packet={} " "data={}") .format(name, network_data["id"], network_data["data_type"], network_data["target_data"])) if not forward_skt: forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) if forward_skt: if network_data["stream"]: sent = False while not sent: try: log.info("sending={}".format( network_data["stream"])) send_msg( forward_skt, network_data["stream"] .encode("utf-8")) sent = True except Exception as e: sent = False time.sleep(0.5) try: forward_skt.close() forward_skt = None except Exception as w: forward_skt = None forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) # end of reconnecting log.info("sent={}".format( network_data["stream"])) if need_response: log.info("receiving") cdr_res = forward_skt.recv(1024) log.info(("cdr - res{}") .format(cdr_res)) else: log.info(("{} EMPTY stream={} " "error={} status={}") .format( name, network_data["stream"], network_data["err"], network_data["status"])) else: log.info(("{} not_supported valid={} " "packet data_type={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["status"])) elif network_data["status"] == FILTERED: log.info(("{} filtered={} status={}") .format(name, network_data["filtered"], network_data["status"])) else: if network_data["status"] == INVALID: log.info(("{} invalid={} packet={} " "error={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["error"], network_data["status"])) else: log.info(("{} unknown={} packet={} " "error={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["error"], network_data["status"])) # end of if valid or not data except KeyboardInterrupt as k: log.info(("{} stopping") .format(name)) break except Exception as e: log.error(("{} failed packaging packet to forward " "with ex={}") .format(name, e)) break # end of try/ex during payload processing # end of if found a next_task log.info(("Consumer: {} {}") .format(name, next_task)) task_queue.task_done() if need_response: answer = "processed: {}".format(next_task()) result_queue.put(answer) # end of while if forward_skt: try: forward_skt.close() log.info("CLOSED connection") forward_skt = None except Exception: log.info("CLOSED connection") # end of cleaning up forwarding socket log.info("{} Done".format(name)) return
def publish_processed_network_packets( name="not-set", task_queue=None, result_queue=None, need_response=False, shutdown_msg="SHUTDOWN"): """ # Redis/RabbitMQ/SQS messaging endpoints for pub-sub routing_key = ev("PUBLISH_EXCHANGE", "reporting.accounts") queue_name = ev("PUBLISH_QUEUE", "reporting.accounts") auth_url = ev("PUB_BROKER_URL", "redis://localhost:6379/15") serializer = "json" """ # these keys need to be cycled to prevent # exploiting static keys filter_key = ev("IGNORE_KEY", INCLUDED_IGNORE_KEY) forward_host = ev("FORWARD_HOST", "127.0.0.1") forward_port = int(ev("FORWARD_PORT", "80")) include_filter_key = ev("FILTER_KEY", "") if not include_filter_key and filter_key: include_filter_key = filter_key filter_keys = [filter_key] log.info(("START consumer={} " "forward={}:{} with " "key={} filters={}") .format(name, forward_host, forward_port, include_filter_key, filter_key)) forward_skt = None not_done = True while not_done: if not forward_skt: forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) next_task = task_queue.get() if next_task: if str(next_task) == shutdown_msg: # Poison pill for shutting down log.info(("{}: DONE CALLBACK " "Exiting msg={}") .format(name, next_task)) task_queue.task_done() break # end of handling shutdown case try: log.debug(("{} parsing") .format(name)) source = next_task.source packet = next_task.payload if not packet: log.error(("{} invalid task found " "{} missing payload") .format(name, next_task)) break log.debug(("{} found msg from src={}") .format(name, source)) network_data = parse_network_data( data_packet=packet, include_filter_key=include_filter_key, filter_keys=filter_keys) if network_data["status"] == VALID: if network_data["data_type"] == TCP \ or network_data["data_type"] == UDP \ or network_data["data_type"] == ARP \ or network_data["data_type"] == ICMP: log.info(("{} valid={} packet={} " "data={}") .format(name, network_data["id"], network_data["data_type"], network_data["target_data"])) if not forward_skt: forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) if forward_skt: if network_data["stream"]: sent = False while not sent: try: log.info("sending={}".format( network_data["stream"])) send_msg( forward_skt, network_data["stream"] .encode("utf-8")) sent = True except Exception as e: sent = False time.sleep(0.5) try: forward_skt.close() forward_skt = None except Exception as w: forward_skt = None forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) # end of reconnecting log.info("sent={}".format( network_data["stream"])) if need_response: log.info("receiving") cdr_res = forward_skt.recv(1024) log.info(("cdr - res{}") .format(cdr_res)) else: log.info(("{} EMPTY stream={} " "error={} status={}") .format( name, network_data["stream"], network_data["err"], network_data["status"])) else: log.info(("{} not_supported valid={} " "packet data_type={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["status"])) elif network_data["status"] == FILTERED: log.info(("{} filtered={} status={}") .format(name, network_data["filtered"], network_data["status"])) else: if network_data["status"] == INVALID: log.info(("{} invalid={} packet={} " "error={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["error"], network_data["status"])) else: log.info(("{} unknown={} packet={} " "error={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["error"], network_data["status"])) # end of if valid or not data except KeyboardInterrupt as k: log.info(("{} stopping") .format(name)) break except Exception as e: log.error(("{} failed packaging packet to forward " "with ex={}") .format(name, e)) break # end of try/ex during payload processing # end of if found a next_task log.info(("Consumer: {} {}") .format(name, next_task)) task_queue.task_done() if need_response: answer = "processed: {}".format(next_task()) result_queue.put(answer) # end of while if forward_skt: try: forward_skt.close() log.info("CLOSED connection") forward_skt = None except Exception: log.info("CLOSED connection") # end of cleaning up forwarding socket log.info("{} Done".format(name)) return
[ "#", "Redis", "/", "RabbitMQ", "/", "SQS", "messaging", "endpoints", "for", "pub", "-", "sub", "routing_key", "=", "ev", "(", "PUBLISH_EXCHANGE", "reporting", ".", "accounts", ")", "queue_name", "=", "ev", "(", "PUBLISH_QUEUE", "reporting", ".", "accounts", ...
jay-johnson/network-pipeline
python
https://github.com/jay-johnson/network-pipeline/blob/4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa/network_pipeline/scripts/network_agent.py#L35-L244
[ "def", "publish_processed_network_packets", "(", "name", "=", "\"not-set\"", ",", "task_queue", "=", "None", ",", "result_queue", "=", "None", ",", "need_response", "=", "False", ",", "shutdown_msg", "=", "\"SHUTDOWN\"", ")", ":", "# these keys need to be cycled to pr...
4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa
train
run_main
run_main start the packet consumers and the packet processors :param need_response: should send response back to publisher :param callback: handler method
network_pipeline/scripts/network_agent.py
def run_main( need_response=False, callback=None): """run_main start the packet consumers and the packet processors :param need_response: should send response back to publisher :param callback: handler method """ stop_file = ev("STOP_FILE", "/opt/stop_recording") num_workers = int(ev("NUM_WORKERS", "1")) shutdown_msg = "SHUTDOWN" log.info("Start - {}".format(name)) log.info("Creating multiprocessing queue") tasks = multiprocessing.JoinableQueue() queue_to_consume = multiprocessing.Queue() host = "localhost" # Start consumers log.info("Starting Consumers to process queued tasks") consumers = start_consumers_for_queue( num_workers=num_workers, tasks=tasks, queue_to_consume=queue_to_consume, shutdown_msg=shutdown_msg, consumer_class=WorkerToProcessPackets, callback=callback) log.info("creating socket") skt = create_layer_2_socket() log.info("socket created") not_done = True while not_done: if not skt: log.info("Failed to create layer 2 socket") log.info("Please make sure to run as root") not_done = False break try: if os.path.exists(stop_file): log.info(("Detected stop_file={}") .format(stop_file)) not_done = False break # stop if the file exists # Only works on linux packet = skt.recvfrom(65565) if os.path.exists(stop_file): log.info(("Detected stop_file={}") .format(stop_file)) not_done = False break # stop if the file was created during a wait loop tasks.put(NetworkPacketTask(source=host, payload=packet)) except KeyboardInterrupt as k: log.info("Stopping") not_done = False break except Exception as e: log.error(("Failed reading socket with ex={}") .format(e)) not_done = False break # end of try/ex during socket receving # end of while processing network packets log.info(("Shutting down consumers={}") .format(len(consumers))) shutdown_consumers(num_workers=num_workers, tasks=tasks) # Wait for all of the tasks to finish if need_response: log.info("Waiting for tasks to finish") tasks.join() log.info("Done waiting for tasks to finish")
def run_main( need_response=False, callback=None): """run_main start the packet consumers and the packet processors :param need_response: should send response back to publisher :param callback: handler method """ stop_file = ev("STOP_FILE", "/opt/stop_recording") num_workers = int(ev("NUM_WORKERS", "1")) shutdown_msg = "SHUTDOWN" log.info("Start - {}".format(name)) log.info("Creating multiprocessing queue") tasks = multiprocessing.JoinableQueue() queue_to_consume = multiprocessing.Queue() host = "localhost" # Start consumers log.info("Starting Consumers to process queued tasks") consumers = start_consumers_for_queue( num_workers=num_workers, tasks=tasks, queue_to_consume=queue_to_consume, shutdown_msg=shutdown_msg, consumer_class=WorkerToProcessPackets, callback=callback) log.info("creating socket") skt = create_layer_2_socket() log.info("socket created") not_done = True while not_done: if not skt: log.info("Failed to create layer 2 socket") log.info("Please make sure to run as root") not_done = False break try: if os.path.exists(stop_file): log.info(("Detected stop_file={}") .format(stop_file)) not_done = False break # stop if the file exists # Only works on linux packet = skt.recvfrom(65565) if os.path.exists(stop_file): log.info(("Detected stop_file={}") .format(stop_file)) not_done = False break # stop if the file was created during a wait loop tasks.put(NetworkPacketTask(source=host, payload=packet)) except KeyboardInterrupt as k: log.info("Stopping") not_done = False break except Exception as e: log.error(("Failed reading socket with ex={}") .format(e)) not_done = False break # end of try/ex during socket receving # end of while processing network packets log.info(("Shutting down consumers={}") .format(len(consumers))) shutdown_consumers(num_workers=num_workers, tasks=tasks) # Wait for all of the tasks to finish if need_response: log.info("Waiting for tasks to finish") tasks.join() log.info("Done waiting for tasks to finish")
[ "run_main" ]
jay-johnson/network-pipeline
python
https://github.com/jay-johnson/network-pipeline/blob/4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa/network_pipeline/scripts/network_agent.py#L248-L341
[ "def", "run_main", "(", "need_response", "=", "False", ",", "callback", "=", "None", ")", ":", "stop_file", "=", "ev", "(", "\"STOP_FILE\"", ",", "\"/opt/stop_recording\"", ")", "num_workers", "=", "int", "(", "ev", "(", "\"NUM_WORKERS\"", ",", "\"1\"", ")",...
4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa
train
best_model
determine the best model: archaea, bacteria, eukarya (best score)
ctbBio/16SfromHMM.py
def best_model(seq2hmm): """ determine the best model: archaea, bacteria, eukarya (best score) """ for seq in seq2hmm: best = [] for model in seq2hmm[seq]: best.append([model, sorted([i[-1] for i in seq2hmm[seq][model]], reverse = True)[0]]) best_model = sorted(best, key = itemgetter(1), reverse = True)[0][0] seq2hmm[seq] = [best_model] + [seq2hmm[seq][best_model]] return seq2hmm
def best_model(seq2hmm): """ determine the best model: archaea, bacteria, eukarya (best score) """ for seq in seq2hmm: best = [] for model in seq2hmm[seq]: best.append([model, sorted([i[-1] for i in seq2hmm[seq][model]], reverse = True)[0]]) best_model = sorted(best, key = itemgetter(1), reverse = True)[0][0] seq2hmm[seq] = [best_model] + [seq2hmm[seq][best_model]] return seq2hmm
[ "determine", "the", "best", "model", ":", "archaea", "bacteria", "eukarya", "(", "best", "score", ")" ]
christophertbrown/bioscripts
python
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L21-L31
[ "def", "best_model", "(", "seq2hmm", ")", ":", "for", "seq", "in", "seq2hmm", ":", "best", "=", "[", "]", "for", "model", "in", "seq2hmm", "[", "seq", "]", ":", "best", ".", "append", "(", "[", "model", ",", "sorted", "(", "[", "i", "[", "-", "...
83b2566b3a5745437ec651cd6cafddd056846240
train
check_gaps
check for large gaps between alignment windows
ctbBio/16SfromHMM.py
def check_gaps(matches, gap_threshold = 0): """ check for large gaps between alignment windows """ gaps = [] prev = None for match in sorted(matches, key = itemgetter(0)): if prev is None: prev = match continue if match[0] - prev[1] >= gap_threshold: gaps.append([prev, match]) prev = match return [[i[0][1], i[1][0]] for i in gaps]
def check_gaps(matches, gap_threshold = 0): """ check for large gaps between alignment windows """ gaps = [] prev = None for match in sorted(matches, key = itemgetter(0)): if prev is None: prev = match continue if match[0] - prev[1] >= gap_threshold: gaps.append([prev, match]) prev = match return [[i[0][1], i[1][0]] for i in gaps]
[ "check", "for", "large", "gaps", "between", "alignment", "windows" ]
christophertbrown/bioscripts
python
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L33-L46
[ "def", "check_gaps", "(", "matches", ",", "gap_threshold", "=", "0", ")", ":", "gaps", "=", "[", "]", "prev", "=", "None", "for", "match", "in", "sorted", "(", "matches", ",", "key", "=", "itemgetter", "(", "0", ")", ")", ":", "if", "prev", "is", ...
83b2566b3a5745437ec651cd6cafddd056846240
train
check_overlap
determine if sequence has already hit the same part of the model, indicating that this hit is for another 16S rRNA gene
ctbBio/16SfromHMM.py
def check_overlap(current, hit, overlap = 200): """ determine if sequence has already hit the same part of the model, indicating that this hit is for another 16S rRNA gene """ for prev in current: p_coords = prev[2:4] coords = hit[2:4] if get_overlap(coords, p_coords) >= overlap: return True return False
def check_overlap(current, hit, overlap = 200): """ determine if sequence has already hit the same part of the model, indicating that this hit is for another 16S rRNA gene """ for prev in current: p_coords = prev[2:4] coords = hit[2:4] if get_overlap(coords, p_coords) >= overlap: return True return False
[ "determine", "if", "sequence", "has", "already", "hit", "the", "same", "part", "of", "the", "model", "indicating", "that", "this", "hit", "is", "for", "another", "16S", "rRNA", "gene" ]
christophertbrown/bioscripts
python
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L51-L61
[ "def", "check_overlap", "(", "current", ",", "hit", ",", "overlap", "=", "200", ")", ":", "for", "prev", "in", "current", ":", "p_coords", "=", "prev", "[", "2", ":", "4", "]", "coords", "=", "hit", "[", "2", ":", "4", "]", "if", "get_overlap", "...
83b2566b3a5745437ec651cd6cafddd056846240
train
check_order
determine if hits are sequential on model and on the same strand * if not, they should be split into different groups
ctbBio/16SfromHMM.py
def check_order(current, hit, overlap = 200): """ determine if hits are sequential on model and on the same strand * if not, they should be split into different groups """ prev_model = current[-1][2:4] prev_strand = current[-1][-2] hit_model = hit[2:4] hit_strand = hit[-2] # make sure they are on the same strand if prev_strand != hit_strand: return False # check for sequential hits on + strand if prev_strand == '+' and (prev_model[1] - hit_model[0] >= overlap): return False # check for sequential hits on - strand if prev_strand == '-' and (hit_model[1] - prev_model[0] >= overlap): return False else: return True
def check_order(current, hit, overlap = 200): """ determine if hits are sequential on model and on the same strand * if not, they should be split into different groups """ prev_model = current[-1][2:4] prev_strand = current[-1][-2] hit_model = hit[2:4] hit_strand = hit[-2] # make sure they are on the same strand if prev_strand != hit_strand: return False # check for sequential hits on + strand if prev_strand == '+' and (prev_model[1] - hit_model[0] >= overlap): return False # check for sequential hits on - strand if prev_strand == '-' and (hit_model[1] - prev_model[0] >= overlap): return False else: return True
[ "determine", "if", "hits", "are", "sequential", "on", "model", "and", "on", "the", "same", "strand", "*", "if", "not", "they", "should", "be", "split", "into", "different", "groups" ]
christophertbrown/bioscripts
python
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L63-L83
[ "def", "check_order", "(", "current", ",", "hit", ",", "overlap", "=", "200", ")", ":", "prev_model", "=", "current", "[", "-", "1", "]", "[", "2", ":", "4", "]", "prev_strand", "=", "current", "[", "-", "1", "]", "[", "-", "2", "]", "hit_model",...
83b2566b3a5745437ec651cd6cafddd056846240
train
hit_groups
* each sequence may have more than one 16S rRNA gene * group hits for each gene
ctbBio/16SfromHMM.py
def hit_groups(hits): """ * each sequence may have more than one 16S rRNA gene * group hits for each gene """ groups = [] current = False for hit in sorted(hits, key = itemgetter(0)): if current is False: current = [hit] elif check_overlap(current, hit) is True or check_order(current, hit) is False: groups.append(current) current = [hit] else: current.append(hit) groups.append(current) return groups
def hit_groups(hits): """ * each sequence may have more than one 16S rRNA gene * group hits for each gene """ groups = [] current = False for hit in sorted(hits, key = itemgetter(0)): if current is False: current = [hit] elif check_overlap(current, hit) is True or check_order(current, hit) is False: groups.append(current) current = [hit] else: current.append(hit) groups.append(current) return groups
[ "*", "each", "sequence", "may", "have", "more", "than", "one", "16S", "rRNA", "gene", "*", "group", "hits", "for", "each", "gene" ]
christophertbrown/bioscripts
python
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L85-L101
[ "def", "hit_groups", "(", "hits", ")", ":", "groups", "=", "[", "]", "current", "=", "False", "for", "hit", "in", "sorted", "(", "hits", ",", "key", "=", "itemgetter", "(", "0", ")", ")", ":", "if", "current", "is", "False", ":", "current", "=", ...
83b2566b3a5745437ec651cd6cafddd056846240
train
find_coordinates
find 16S rRNA gene sequence coordinates
ctbBio/16SfromHMM.py
def find_coordinates(hmms, bit_thresh): """ find 16S rRNA gene sequence coordinates """ # get coordinates from cmsearch output seq2hmm = parse_hmm(hmms, bit_thresh) seq2hmm = best_model(seq2hmm) group2hmm = {} # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps] for seq, info in list(seq2hmm.items()): group2hmm[seq] = {} # info = [model, [[hit1], [hit2], ...]] for group_num, group in enumerate(hit_groups(info[1])): # group is a group of hits to a single 16S gene # determine matching strand based on best hit best = sorted(group, reverse = True, key = itemgetter(-1))[0] strand = best[5] coordinates = [i[0] for i in group] + [i[1] for i in group] coordinates = [min(coordinates), max(coordinates), strand] # make sure all hits are to the same strand matches = [i for i in group if i[5] == strand] # gaps = [[gstart, gend], [gstart2, gend2]] gaps = check_gaps(matches) group2hmm[seq][group_num] = [info[0], strand, coordinates, matches, gaps] return group2hmm
def find_coordinates(hmms, bit_thresh): """ find 16S rRNA gene sequence coordinates """ # get coordinates from cmsearch output seq2hmm = parse_hmm(hmms, bit_thresh) seq2hmm = best_model(seq2hmm) group2hmm = {} # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps] for seq, info in list(seq2hmm.items()): group2hmm[seq] = {} # info = [model, [[hit1], [hit2], ...]] for group_num, group in enumerate(hit_groups(info[1])): # group is a group of hits to a single 16S gene # determine matching strand based on best hit best = sorted(group, reverse = True, key = itemgetter(-1))[0] strand = best[5] coordinates = [i[0] for i in group] + [i[1] for i in group] coordinates = [min(coordinates), max(coordinates), strand] # make sure all hits are to the same strand matches = [i for i in group if i[5] == strand] # gaps = [[gstart, gend], [gstart2, gend2]] gaps = check_gaps(matches) group2hmm[seq][group_num] = [info[0], strand, coordinates, matches, gaps] return group2hmm
[ "find", "16S", "rRNA", "gene", "sequence", "coordinates" ]
christophertbrown/bioscripts
python
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L103-L126
[ "def", "find_coordinates", "(", "hmms", ",", "bit_thresh", ")", ":", "# get coordinates from cmsearch output", "seq2hmm", "=", "parse_hmm", "(", "hmms", ",", "bit_thresh", ")", "seq2hmm", "=", "best_model", "(", "seq2hmm", ")", "group2hmm", "=", "{", "}", "# gro...
83b2566b3a5745437ec651cd6cafddd056846240
train
get_info
get info from either ssu-cmsearch or cmsearch output
ctbBio/16SfromHMM.py
def get_info(line, bit_thresh): """ get info from either ssu-cmsearch or cmsearch output """ if len(line) >= 18: # output is from cmsearch id, model, bit, inc = line[0].split()[0], line[2], float(line[14]), line[16] sstart, send, strand = int(line[7]), int(line[8]), line[9] mstart, mend = int(line[5]), int(line[6]) elif len(line) == 9: # output is from ssu-cmsearch if bit_thresh == 0: print('# ssu-cmsearch does not include a model-specific inclusion threshold, ', file=sys.stderr) print('# please specify a bit score threshold', file=sys.stderr) exit() id, model, bit = line[1].split()[0], line[0], float(line[6]) inc = '!' # this is not a feature of ssu-cmsearch sstart, send = int(line[2]), int(line[3]) mstart, mend = int(4), int(5) if send >= sstart: strand = '+' else: strand = '-' else: print('# unsupported hmm format:', file=sys.stderr) print('# provide tabular output from ssu-cmsearch and cmsearch supported', file=sys.stderr) exit() coords = [sstart, send] sstart, send = min(coords), max(coords) mcoords = [mstart, mend] mstart, mend = min(mcoords), max(mcoords) return id, model, bit, sstart, send, mstart, mend, strand, inc
def get_info(line, bit_thresh): """ get info from either ssu-cmsearch or cmsearch output """ if len(line) >= 18: # output is from cmsearch id, model, bit, inc = line[0].split()[0], line[2], float(line[14]), line[16] sstart, send, strand = int(line[7]), int(line[8]), line[9] mstart, mend = int(line[5]), int(line[6]) elif len(line) == 9: # output is from ssu-cmsearch if bit_thresh == 0: print('# ssu-cmsearch does not include a model-specific inclusion threshold, ', file=sys.stderr) print('# please specify a bit score threshold', file=sys.stderr) exit() id, model, bit = line[1].split()[0], line[0], float(line[6]) inc = '!' # this is not a feature of ssu-cmsearch sstart, send = int(line[2]), int(line[3]) mstart, mend = int(4), int(5) if send >= sstart: strand = '+' else: strand = '-' else: print('# unsupported hmm format:', file=sys.stderr) print('# provide tabular output from ssu-cmsearch and cmsearch supported', file=sys.stderr) exit() coords = [sstart, send] sstart, send = min(coords), max(coords) mcoords = [mstart, mend] mstart, mend = min(mcoords), max(mcoords) return id, model, bit, sstart, send, mstart, mend, strand, inc
[ "get", "info", "from", "either", "ssu", "-", "cmsearch", "or", "cmsearch", "output" ]
christophertbrown/bioscripts
python
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L128-L157
[ "def", "get_info", "(", "line", ",", "bit_thresh", ")", ":", "if", "len", "(", "line", ")", ">=", "18", ":", "# output is from cmsearch", "id", ",", "model", ",", "bit", ",", "inc", "=", "line", "[", "0", "]", ".", "split", "(", ")", "[", "0", "]...
83b2566b3a5745437ec651cd6cafddd056846240
train
check_buffer
check to see how much of the buffer is being used
ctbBio/16SfromHMM.py
def check_buffer(coords, length, buffer): """ check to see how much of the buffer is being used """ s = min(coords[0], buffer) e = min(length - coords[1], buffer) return [s, e]
def check_buffer(coords, length, buffer): """ check to see how much of the buffer is being used """ s = min(coords[0], buffer) e = min(length - coords[1], buffer) return [s, e]
[ "check", "to", "see", "how", "much", "of", "the", "buffer", "is", "being", "used" ]
christophertbrown/bioscripts
python
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L189-L195
[ "def", "check_buffer", "(", "coords", ",", "length", ",", "buffer", ")", ":", "s", "=", "min", "(", "coords", "[", "0", "]", ",", "buffer", ")", "e", "=", "min", "(", "length", "-", "coords", "[", "1", "]", ",", "buffer", ")", "return", "[", "s...
83b2566b3a5745437ec651cd6cafddd056846240
train
convert_parser_to
:return: a parser of type parser_or_type, initialized with the properties of parser. If parser_or_type is a type, an instance of it must contain a update method. The update method must also process the set of properties supported by MetadataParser for the conversion to have any affect. :param parser: the parser (or content or parser type) to convert to new_type :param parser_or_type: a parser (or content) or type of parser to return :see: get_metadata_parser(metadata_container) for more on how parser_or_type is treated
gis_metadata/metadata_parser.py
def convert_parser_to(parser, parser_or_type, metadata_props=None): """ :return: a parser of type parser_or_type, initialized with the properties of parser. If parser_or_type is a type, an instance of it must contain a update method. The update method must also process the set of properties supported by MetadataParser for the conversion to have any affect. :param parser: the parser (or content or parser type) to convert to new_type :param parser_or_type: a parser (or content) or type of parser to return :see: get_metadata_parser(metadata_container) for more on how parser_or_type is treated """ old_parser = parser if isinstance(parser, MetadataParser) else get_metadata_parser(parser) new_parser = get_metadata_parser(parser_or_type) for prop in (metadata_props or _supported_props): setattr(new_parser, prop, deepcopy(getattr(old_parser, prop, u''))) new_parser.update() return new_parser
def convert_parser_to(parser, parser_or_type, metadata_props=None): """ :return: a parser of type parser_or_type, initialized with the properties of parser. If parser_or_type is a type, an instance of it must contain a update method. The update method must also process the set of properties supported by MetadataParser for the conversion to have any affect. :param parser: the parser (or content or parser type) to convert to new_type :param parser_or_type: a parser (or content) or type of parser to return :see: get_metadata_parser(metadata_container) for more on how parser_or_type is treated """ old_parser = parser if isinstance(parser, MetadataParser) else get_metadata_parser(parser) new_parser = get_metadata_parser(parser_or_type) for prop in (metadata_props or _supported_props): setattr(new_parser, prop, deepcopy(getattr(old_parser, prop, u''))) new_parser.update() return new_parser
[ ":", "return", ":", "a", "parser", "of", "type", "parser_or_type", "initialized", "with", "the", "properties", "of", "parser", ".", "If", "parser_or_type", "is", "a", "type", "an", "instance", "of", "it", "must", "contain", "a", "update", "method", ".", "T...
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L30-L48
[ "def", "convert_parser_to", "(", "parser", ",", "parser_or_type", ",", "metadata_props", "=", "None", ")", ":", "old_parser", "=", "parser", "if", "isinstance", "(", "parser", ",", "MetadataParser", ")", "else", "get_metadata_parser", "(", "parser", ")", "new_pa...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
get_metadata_parser
Takes a metadata_container, which may be a type or instance of a parser, a dict, string, or file. :return: a new instance of a parser corresponding to the standard represented by metadata_container :see: get_parsed_content(metdata_content) for more on types of content that can be parsed
gis_metadata/metadata_parser.py
def get_metadata_parser(metadata_container, **metadata_defaults): """ Takes a metadata_container, which may be a type or instance of a parser, a dict, string, or file. :return: a new instance of a parser corresponding to the standard represented by metadata_container :see: get_parsed_content(metdata_content) for more on types of content that can be parsed """ parser_type = None if isinstance(metadata_container, MetadataParser): parser_type = type(metadata_container) elif isinstance(metadata_container, type): parser_type = metadata_container metadata_container = metadata_container().update(**metadata_defaults) xml_root, xml_tree = get_parsed_content(metadata_container) # The get_parsed_content method ensures only these roots will be returned parser = None if parser_type is not None: parser = parser_type(xml_tree, **metadata_defaults) elif xml_root in ISO_ROOTS: parser = IsoParser(xml_tree, **metadata_defaults) else: has_arcgis_data = any(element_exists(xml_tree, e) for e in ARCGIS_NODES) if xml_root == FGDC_ROOT and not has_arcgis_data: parser = FgdcParser(xml_tree, **metadata_defaults) elif xml_root in ARCGIS_ROOTS: parser = ArcGISParser(xml_tree, **metadata_defaults) return parser
def get_metadata_parser(metadata_container, **metadata_defaults): """ Takes a metadata_container, which may be a type or instance of a parser, a dict, string, or file. :return: a new instance of a parser corresponding to the standard represented by metadata_container :see: get_parsed_content(metdata_content) for more on types of content that can be parsed """ parser_type = None if isinstance(metadata_container, MetadataParser): parser_type = type(metadata_container) elif isinstance(metadata_container, type): parser_type = metadata_container metadata_container = metadata_container().update(**metadata_defaults) xml_root, xml_tree = get_parsed_content(metadata_container) # The get_parsed_content method ensures only these roots will be returned parser = None if parser_type is not None: parser = parser_type(xml_tree, **metadata_defaults) elif xml_root in ISO_ROOTS: parser = IsoParser(xml_tree, **metadata_defaults) else: has_arcgis_data = any(element_exists(xml_tree, e) for e in ARCGIS_NODES) if xml_root == FGDC_ROOT and not has_arcgis_data: parser = FgdcParser(xml_tree, **metadata_defaults) elif xml_root in ARCGIS_ROOTS: parser = ArcGISParser(xml_tree, **metadata_defaults) return parser
[ "Takes", "a", "metadata_container", "which", "may", "be", "a", "type", "or", "instance", "of", "a", "parser", "a", "dict", "string", "or", "file", ".", ":", "return", ":", "a", "new", "instance", "of", "a", "parser", "corresponding", "to", "the", "standa...
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L51-L85
[ "def", "get_metadata_parser", "(", "metadata_container", ",", "*", "*", "metadata_defaults", ")", ":", "parser_type", "=", "None", "if", "isinstance", "(", "metadata_container", ",", "MetadataParser", ")", ":", "parser_type", "=", "type", "(", "metadata_container", ...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
get_parsed_content
Parses any of the following types of content: 1. XML string or file object: parses XML content 2. MetadataParser instance: deep copies xml_tree 3. Dictionary with nested objects containing: - name (required): the name of the element tag - text: the text contained by element - tail: text immediately following the element - attributes: a Dictionary containing element attributes - children: a List of converted child elements :raises InvalidContent: if the XML is invalid or does not conform to a supported metadata standard :raises NoContent: If the content passed in is null or otherwise empty :return: the XML root along with an XML Tree parsed by and compatible with element_utils
gis_metadata/metadata_parser.py
def get_parsed_content(metadata_content): """ Parses any of the following types of content: 1. XML string or file object: parses XML content 2. MetadataParser instance: deep copies xml_tree 3. Dictionary with nested objects containing: - name (required): the name of the element tag - text: the text contained by element - tail: text immediately following the element - attributes: a Dictionary containing element attributes - children: a List of converted child elements :raises InvalidContent: if the XML is invalid or does not conform to a supported metadata standard :raises NoContent: If the content passed in is null or otherwise empty :return: the XML root along with an XML Tree parsed by and compatible with element_utils """ _import_parsers() # Prevents circular dependencies between modules xml_tree = None if metadata_content is None: raise NoContent('Metadata has no data') else: if isinstance(metadata_content, MetadataParser): xml_tree = deepcopy(metadata_content._xml_tree) elif isinstance(metadata_content, dict): xml_tree = get_element_tree(metadata_content) else: try: # Strip name spaces from file or XML content xml_tree = get_element_tree(metadata_content) except Exception: xml_tree = None # Several exceptions possible, outcome is the same if xml_tree is None: raise InvalidContent( 'Cannot instantiate a {parser_type} parser with invalid content to parse', parser_type=type(metadata_content).__name__ ) xml_root = get_element_name(xml_tree) if xml_root is None: raise NoContent('Metadata contains no data') elif xml_root not in VALID_ROOTS: content = type(metadata_content).__name__ raise InvalidContent('Invalid root element for {content}: {xml_root}', content=content, xml_root=xml_root) return xml_root, xml_tree
def get_parsed_content(metadata_content): """ Parses any of the following types of content: 1. XML string or file object: parses XML content 2. MetadataParser instance: deep copies xml_tree 3. Dictionary with nested objects containing: - name (required): the name of the element tag - text: the text contained by element - tail: text immediately following the element - attributes: a Dictionary containing element attributes - children: a List of converted child elements :raises InvalidContent: if the XML is invalid or does not conform to a supported metadata standard :raises NoContent: If the content passed in is null or otherwise empty :return: the XML root along with an XML Tree parsed by and compatible with element_utils """ _import_parsers() # Prevents circular dependencies between modules xml_tree = None if metadata_content is None: raise NoContent('Metadata has no data') else: if isinstance(metadata_content, MetadataParser): xml_tree = deepcopy(metadata_content._xml_tree) elif isinstance(metadata_content, dict): xml_tree = get_element_tree(metadata_content) else: try: # Strip name spaces from file or XML content xml_tree = get_element_tree(metadata_content) except Exception: xml_tree = None # Several exceptions possible, outcome is the same if xml_tree is None: raise InvalidContent( 'Cannot instantiate a {parser_type} parser with invalid content to parse', parser_type=type(metadata_content).__name__ ) xml_root = get_element_name(xml_tree) if xml_root is None: raise NoContent('Metadata contains no data') elif xml_root not in VALID_ROOTS: content = type(metadata_content).__name__ raise InvalidContent('Invalid root element for {content}: {xml_root}', content=content, xml_root=xml_root) return xml_root, xml_tree
[ "Parses", "any", "of", "the", "following", "types", "of", "content", ":", "1", ".", "XML", "string", "or", "file", "object", ":", "parses", "XML", "content", "2", ".", "MetadataParser", "instance", ":", "deep", "copies", "xml_tree", "3", ".", "Dictionary",...
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L88-L138
[ "def", "get_parsed_content", "(", "metadata_content", ")", ":", "_import_parsers", "(", ")", "# Prevents circular dependencies between modules", "xml_tree", "=", "None", "if", "metadata_content", "is", "None", ":", "raise", "NoContent", "(", "'Metadata has no data'", ")",...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
_import_parsers
Lazy imports to prevent circular dependencies between this module and utils
gis_metadata/metadata_parser.py
def _import_parsers(): """ Lazy imports to prevent circular dependencies between this module and utils """ global ARCGIS_NODES global ARCGIS_ROOTS global ArcGISParser global FGDC_ROOT global FgdcParser global ISO_ROOTS global IsoParser global VALID_ROOTS if ARCGIS_NODES is None or ARCGIS_ROOTS is None or ArcGISParser is None: from gis_metadata.arcgis_metadata_parser import ARCGIS_NODES from gis_metadata.arcgis_metadata_parser import ARCGIS_ROOTS from gis_metadata.arcgis_metadata_parser import ArcGISParser if FGDC_ROOT is None or FgdcParser is None: from gis_metadata.fgdc_metadata_parser import FGDC_ROOT from gis_metadata.fgdc_metadata_parser import FgdcParser if ISO_ROOTS is None or IsoParser is None: from gis_metadata.iso_metadata_parser import ISO_ROOTS from gis_metadata.iso_metadata_parser import IsoParser if VALID_ROOTS is None: VALID_ROOTS = {FGDC_ROOT}.union(ARCGIS_ROOTS + ISO_ROOTS)
def _import_parsers(): """ Lazy imports to prevent circular dependencies between this module and utils """ global ARCGIS_NODES global ARCGIS_ROOTS global ArcGISParser global FGDC_ROOT global FgdcParser global ISO_ROOTS global IsoParser global VALID_ROOTS if ARCGIS_NODES is None or ARCGIS_ROOTS is None or ArcGISParser is None: from gis_metadata.arcgis_metadata_parser import ARCGIS_NODES from gis_metadata.arcgis_metadata_parser import ARCGIS_ROOTS from gis_metadata.arcgis_metadata_parser import ArcGISParser if FGDC_ROOT is None or FgdcParser is None: from gis_metadata.fgdc_metadata_parser import FGDC_ROOT from gis_metadata.fgdc_metadata_parser import FgdcParser if ISO_ROOTS is None or IsoParser is None: from gis_metadata.iso_metadata_parser import ISO_ROOTS from gis_metadata.iso_metadata_parser import IsoParser if VALID_ROOTS is None: VALID_ROOTS = {FGDC_ROOT}.union(ARCGIS_ROOTS + ISO_ROOTS)
[ "Lazy", "imports", "to", "prevent", "circular", "dependencies", "between", "this", "module", "and", "utils" ]
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L141-L170
[ "def", "_import_parsers", "(", ")", ":", "global", "ARCGIS_NODES", "global", "ARCGIS_ROOTS", "global", "ArcGISParser", "global", "FGDC_ROOT", "global", "FgdcParser", "global", "ISO_ROOTS", "global", "IsoParser", "global", "VALID_ROOTS", "if", "ARCGIS_NODES", "is", "No...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
MetadataParser._init_metadata
Dynamically sets attributes from a Dictionary passed in by children. The Dictionary will contain the name of each attribute as keys, and either an XPATH mapping to a text value in _xml_tree, or a function that takes no parameters and returns the intended value.
gis_metadata/metadata_parser.py
def _init_metadata(self): """ Dynamically sets attributes from a Dictionary passed in by children. The Dictionary will contain the name of each attribute as keys, and either an XPATH mapping to a text value in _xml_tree, or a function that takes no parameters and returns the intended value. """ if self._data_map is None: self._init_data_map() validate_properties(self._data_map, self._metadata_props) # Parse attribute values and assign them: key = parse(val) for prop in self._data_map: setattr(self, prop, parse_property(self._xml_tree, None, self._data_map, prop)) self.has_data = any(getattr(self, prop) for prop in self._data_map)
def _init_metadata(self): """ Dynamically sets attributes from a Dictionary passed in by children. The Dictionary will contain the name of each attribute as keys, and either an XPATH mapping to a text value in _xml_tree, or a function that takes no parameters and returns the intended value. """ if self._data_map is None: self._init_data_map() validate_properties(self._data_map, self._metadata_props) # Parse attribute values and assign them: key = parse(val) for prop in self._data_map: setattr(self, prop, parse_property(self._xml_tree, None, self._data_map, prop)) self.has_data = any(getattr(self, prop) for prop in self._data_map)
[ "Dynamically", "sets", "attributes", "from", "a", "Dictionary", "passed", "in", "by", "children", ".", "The", "Dictionary", "will", "contain", "the", "name", "of", "each", "attribute", "as", "keys", "and", "either", "an", "XPATH", "mapping", "to", "a", "text...
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L236-L254
[ "def", "_init_metadata", "(", "self", ")", ":", "if", "self", ".", "_data_map", "is", "None", ":", "self", ".", "_init_data_map", "(", ")", "validate_properties", "(", "self", ".", "_data_map", ",", "self", ".", "_metadata_props", ")", "# Parse attribute value...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
MetadataParser._init_data_map
Default data map initialization: MUST be overridden in children
gis_metadata/metadata_parser.py
def _init_data_map(self): """ Default data map initialization: MUST be overridden in children """ if self._data_map is None: self._data_map = {'_root': None} self._data_map.update({}.fromkeys(self._metadata_props))
def _init_data_map(self): """ Default data map initialization: MUST be overridden in children """ if self._data_map is None: self._data_map = {'_root': None} self._data_map.update({}.fromkeys(self._metadata_props))
[ "Default", "data", "map", "initialization", ":", "MUST", "be", "overridden", "in", "children" ]
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L256-L261
[ "def", "_init_data_map", "(", "self", ")", ":", "if", "self", ".", "_data_map", "is", "None", ":", "self", ".", "_data_map", "=", "{", "'_root'", ":", "None", "}", "self", ".", "_data_map", ".", "update", "(", "{", "}", ".", "fromkeys", "(", "self", ...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
MetadataParser._get_template
Iterate over items metadata_defaults {prop: val, ...} to populate template
gis_metadata/metadata_parser.py
def _get_template(self, root=None, **metadata_defaults): """ Iterate over items metadata_defaults {prop: val, ...} to populate template """ if root is None: if self._data_map is None: self._init_data_map() root = self._xml_root = self._data_map['_root'] template_tree = self._xml_tree = create_element_tree(root) for prop, val in iteritems(metadata_defaults): path = self._data_map.get(prop) if path and val: setattr(self, prop, val) update_property(template_tree, None, path, prop, val) return template_tree
def _get_template(self, root=None, **metadata_defaults): """ Iterate over items metadata_defaults {prop: val, ...} to populate template """ if root is None: if self._data_map is None: self._init_data_map() root = self._xml_root = self._data_map['_root'] template_tree = self._xml_tree = create_element_tree(root) for prop, val in iteritems(metadata_defaults): path = self._data_map.get(prop) if path and val: setattr(self, prop, val) update_property(template_tree, None, path, prop, val) return template_tree
[ "Iterate", "over", "items", "metadata_defaults", "{", "prop", ":", "val", "...", "}", "to", "populate", "template" ]
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L263-L280
[ "def", "_get_template", "(", "self", ",", "root", "=", "None", ",", "*", "*", "metadata_defaults", ")", ":", "if", "root", "is", "None", ":", "if", "self", ".", "_data_map", "is", "None", ":", "self", ".", "_init_data_map", "(", ")", "root", "=", "se...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
MetadataParser._get_xpath_for
:return: the configured xpath for a given property
gis_metadata/metadata_parser.py
def _get_xpath_for(self, prop): """ :return: the configured xpath for a given property """ xpath = self._data_map.get(prop) return getattr(xpath, 'xpath', xpath)
def _get_xpath_for(self, prop): """ :return: the configured xpath for a given property """ xpath = self._data_map.get(prop) return getattr(xpath, 'xpath', xpath)
[ ":", "return", ":", "the", "configured", "xpath", "for", "a", "given", "property" ]
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L282-L286
[ "def", "_get_xpath_for", "(", "self", ",", "prop", ")", ":", "xpath", "=", "self", ".", "_data_map", ".", "get", "(", "prop", ")", "return", "getattr", "(", "xpath", ",", "'xpath'", ",", "xpath", ")" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
MetadataParser._parse_complex
Default parsing operation for a complex struct
gis_metadata/metadata_parser.py
def _parse_complex(self, prop): """ Default parsing operation for a complex struct """ xpath_root = None xpath_map = self._data_structures[prop] return parse_complex(self._xml_tree, xpath_root, xpath_map, prop)
def _parse_complex(self, prop): """ Default parsing operation for a complex struct """ xpath_root = None xpath_map = self._data_structures[prop] return parse_complex(self._xml_tree, xpath_root, xpath_map, prop)
[ "Default", "parsing", "operation", "for", "a", "complex", "struct" ]
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L293-L299
[ "def", "_parse_complex", "(", "self", ",", "prop", ")", ":", "xpath_root", "=", "None", "xpath_map", "=", "self", ".", "_data_structures", "[", "prop", "]", "return", "parse_complex", "(", "self", ".", "_xml_tree", ",", "xpath_root", ",", "xpath_map", ",", ...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
MetadataParser._parse_complex_list
Default parsing operation for lists of complex structs
gis_metadata/metadata_parser.py
def _parse_complex_list(self, prop): """ Default parsing operation for lists of complex structs """ xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return parse_complex_list(self._xml_tree, xpath_root, xpath_map, prop)
def _parse_complex_list(self, prop): """ Default parsing operation for lists of complex structs """ xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return parse_complex_list(self._xml_tree, xpath_root, xpath_map, prop)
[ "Default", "parsing", "operation", "for", "lists", "of", "complex", "structs" ]
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L301-L307
[ "def", "_parse_complex_list", "(", "self", ",", "prop", ")", ":", "xpath_root", "=", "self", ".", "_get_xroot_for", "(", "prop", ")", "xpath_map", "=", "self", ".", "_data_structures", "[", "prop", "]", "return", "parse_complex_list", "(", "self", ".", "_xml...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
MetadataParser._parse_dates
Creates and returns a Date Types data structure parsed from the metadata
gis_metadata/metadata_parser.py
def _parse_dates(self, prop=DATES): """ Creates and returns a Date Types data structure parsed from the metadata """ return parse_dates(self._xml_tree, self._data_structures[prop])
def _parse_dates(self, prop=DATES): """ Creates and returns a Date Types data structure parsed from the metadata """ return parse_dates(self._xml_tree, self._data_structures[prop])
[ "Creates", "and", "returns", "a", "Date", "Types", "data", "structure", "parsed", "from", "the", "metadata" ]
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L309-L312
[ "def", "_parse_dates", "(", "self", ",", "prop", "=", "DATES", ")", ":", "return", "parse_dates", "(", "self", ".", "_xml_tree", ",", "self", ".", "_data_structures", "[", "prop", "]", ")" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
MetadataParser._update_complex
Default update operation for a complex struct
gis_metadata/metadata_parser.py
def _update_complex(self, **update_props): """ Default update operation for a complex struct """ prop = update_props['prop'] xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return update_complex(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
def _update_complex(self, **update_props): """ Default update operation for a complex struct """ prop = update_props['prop'] xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return update_complex(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
[ "Default", "update", "operation", "for", "a", "complex", "struct" ]
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L314-L321
[ "def", "_update_complex", "(", "self", ",", "*", "*", "update_props", ")", ":", "prop", "=", "update_props", "[", "'prop'", "]", "xpath_root", "=", "self", ".", "_get_xroot_for", "(", "prop", ")", "xpath_map", "=", "self", ".", "_data_structures", "[", "pr...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
MetadataParser._update_complex_list
Default update operation for lists of complex structs
gis_metadata/metadata_parser.py
def _update_complex_list(self, **update_props): """ Default update operation for lists of complex structs """ prop = update_props['prop'] xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return update_complex_list(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
def _update_complex_list(self, **update_props): """ Default update operation for lists of complex structs """ prop = update_props['prop'] xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return update_complex_list(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
[ "Default", "update", "operation", "for", "lists", "of", "complex", "structs" ]
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L323-L330
[ "def", "_update_complex_list", "(", "self", ",", "*", "*", "update_props", ")", ":", "prop", "=", "update_props", "[", "'prop'", "]", "xpath_root", "=", "self", ".", "_get_xroot_for", "(", "prop", ")", "xpath_map", "=", "self", ".", "_data_structures", "[", ...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
MetadataParser._update_dates
Default update operation for Dates metadata :see: gis_metadata.utils._complex_definitions[DATES]
gis_metadata/metadata_parser.py
def _update_dates(self, xpath_root=None, **update_props): """ Default update operation for Dates metadata :see: gis_metadata.utils._complex_definitions[DATES] """ tree_to_update = update_props['tree_to_update'] prop = update_props['prop'] values = (update_props['values'] or {}).get(DATE_VALUES) or u'' xpaths = self._data_structures[prop] if not self.dates: date_xpaths = xpath_root elif self.dates[DATE_TYPE] != DATE_TYPE_RANGE: date_xpaths = xpaths.get(self.dates[DATE_TYPE], u'') else: date_xpaths = [ xpaths[DATE_TYPE_RANGE_BEGIN], xpaths[DATE_TYPE_RANGE_END] ] if xpath_root: remove_element(tree_to_update, xpath_root) return update_property(tree_to_update, xpath_root, date_xpaths, prop, values)
def _update_dates(self, xpath_root=None, **update_props): """ Default update operation for Dates metadata :see: gis_metadata.utils._complex_definitions[DATES] """ tree_to_update = update_props['tree_to_update'] prop = update_props['prop'] values = (update_props['values'] or {}).get(DATE_VALUES) or u'' xpaths = self._data_structures[prop] if not self.dates: date_xpaths = xpath_root elif self.dates[DATE_TYPE] != DATE_TYPE_RANGE: date_xpaths = xpaths.get(self.dates[DATE_TYPE], u'') else: date_xpaths = [ xpaths[DATE_TYPE_RANGE_BEGIN], xpaths[DATE_TYPE_RANGE_END] ] if xpath_root: remove_element(tree_to_update, xpath_root) return update_property(tree_to_update, xpath_root, date_xpaths, prop, values)
[ "Default", "update", "operation", "for", "Dates", "metadata", ":", "see", ":", "gis_metadata", ".", "utils", ".", "_complex_definitions", "[", "DATES", "]" ]
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L332-L356
[ "def", "_update_dates", "(", "self", ",", "xpath_root", "=", "None", ",", "*", "*", "update_props", ")", ":", "tree_to_update", "=", "update_props", "[", "'tree_to_update'", "]", "prop", "=", "update_props", "[", "'prop'", "]", "values", "=", "(", "update_pr...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
MetadataParser.write
Validates instance properties, updates an XML tree with them, and writes the content to a file. :param use_template: if True, updates a new template XML tree; otherwise the original XML tree :param out_file_or_path: optionally override self.out_file_or_path with a custom file path :param encoding: optionally use another encoding instead of UTF-8
gis_metadata/metadata_parser.py
def write(self, use_template=False, out_file_or_path=None, encoding=DEFAULT_ENCODING): """ Validates instance properties, updates an XML tree with them, and writes the content to a file. :param use_template: if True, updates a new template XML tree; otherwise the original XML tree :param out_file_or_path: optionally override self.out_file_or_path with a custom file path :param encoding: optionally use another encoding instead of UTF-8 """ if not out_file_or_path: out_file_or_path = self.out_file_or_path if not out_file_or_path: # FileNotFoundError doesn't exist in Python 2 raise IOError('Output file path has not been provided') write_element(self.update(use_template), out_file_or_path, encoding)
def write(self, use_template=False, out_file_or_path=None, encoding=DEFAULT_ENCODING): """ Validates instance properties, updates an XML tree with them, and writes the content to a file. :param use_template: if True, updates a new template XML tree; otherwise the original XML tree :param out_file_or_path: optionally override self.out_file_or_path with a custom file path :param encoding: optionally use another encoding instead of UTF-8 """ if not out_file_or_path: out_file_or_path = self.out_file_or_path if not out_file_or_path: # FileNotFoundError doesn't exist in Python 2 raise IOError('Output file path has not been provided') write_element(self.update(use_template), out_file_or_path, encoding)
[ "Validates", "instance", "properties", "updates", "an", "XML", "tree", "with", "them", "and", "writes", "the", "content", "to", "a", "file", ".", ":", "param", "use_template", ":", "if", "True", "updates", "a", "new", "template", "XML", "tree", ";", "other...
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L373-L388
[ "def", "write", "(", "self", ",", "use_template", "=", "False", ",", "out_file_or_path", "=", "None", ",", "encoding", "=", "DEFAULT_ENCODING", ")", ":", "if", "not", "out_file_or_path", ":", "out_file_or_path", "=", "self", ".", "out_file_or_path", "if", "not...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
MetadataParser.validate
Default validation for updated properties: MAY be overridden in children
gis_metadata/metadata_parser.py
def validate(self): """ Default validation for updated properties: MAY be overridden in children """ validate_properties(self._data_map, self._metadata_props) for prop in self._data_map: validate_any(prop, getattr(self, prop), self._data_structures.get(prop)) return self
def validate(self): """ Default validation for updated properties: MAY be overridden in children """ validate_properties(self._data_map, self._metadata_props) for prop in self._data_map: validate_any(prop, getattr(self, prop), self._data_structures.get(prop)) return self
[ "Default", "validation", "for", "updated", "properties", ":", "MAY", "be", "overridden", "in", "children" ]
consbio/gis-metadata-parser
python
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L410-L418
[ "def", "validate", "(", "self", ")", ":", "validate_properties", "(", "self", ".", "_data_map", ",", "self", ".", "_metadata_props", ")", "for", "prop", "in", "self", ".", "_data_map", ":", "validate_any", "(", "prop", ",", "getattr", "(", "self", ",", "...
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
train
_search_regex
Search order: * specified regexps * operators sorted from longer to shorter
sugartex/sugartex_filter.py
def _search_regex(ops: dict, regex_pat: str): """ Search order: * specified regexps * operators sorted from longer to shorter """ custom_regexps = list(filter(None, [dic['regex'] for op, dic in ops.items() if 'regex' in dic])) op_names = [op for op, dic in ops.items() if 'regex' not in dic] regex = [regex_pat.format(_ops_regex(op_names))] if len(op_names) > 0 else [] return re.compile('|'.join(custom_regexps + regex))
def _search_regex(ops: dict, regex_pat: str): """ Search order: * specified regexps * operators sorted from longer to shorter """ custom_regexps = list(filter(None, [dic['regex'] for op, dic in ops.items() if 'regex' in dic])) op_names = [op for op, dic in ops.items() if 'regex' not in dic] regex = [regex_pat.format(_ops_regex(op_names))] if len(op_names) > 0 else [] return re.compile('|'.join(custom_regexps + regex))
[ "Search", "order", ":", "*", "specified", "regexps", "*", "operators", "sorted", "from", "longer", "to", "shorter" ]
kiwi0fruit/sugartex
python
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L165-L174
[ "def", "_search_regex", "(", "ops", ":", "dict", ",", "regex_pat", ":", "str", ")", ":", "custom_regexps", "=", "list", "(", "filter", "(", "None", ",", "[", "dic", "[", "'regex'", "]", "for", "op", ",", "dic", "in", "ops", ".", "items", "(", ")", ...
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
train
Styles.spec
Return prefix unary operators list
sugartex/sugartex_filter.py
def spec(self, postf_un_ops: str) -> list: """Return prefix unary operators list""" spec = [(l + op, {'pat': self.pat(pat), 'postf': self.postf(r, postf_un_ops), 'regex': None}) for op, pat in self.styles.items() for l, r in self.brackets] spec[0][1]['regex'] = self.regex_pat.format( _ops_regex(l for l, r in self.brackets), _ops_regex(self.styles.keys()) ) return spec
def spec(self, postf_un_ops: str) -> list: """Return prefix unary operators list""" spec = [(l + op, {'pat': self.pat(pat), 'postf': self.postf(r, postf_un_ops), 'regex': None}) for op, pat in self.styles.items() for l, r in self.brackets] spec[0][1]['regex'] = self.regex_pat.format( _ops_regex(l for l, r in self.brackets), _ops_regex(self.styles.keys()) ) return spec
[ "Return", "prefix", "unary", "operators", "list" ]
kiwi0fruit/sugartex
python
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L216-L227
[ "def", "spec", "(", "self", ",", "postf_un_ops", ":", "str", ")", "->", "list", ":", "spec", "=", "[", "(", "l", "+", "op", ",", "{", "'pat'", ":", "self", ".", "pat", "(", "pat", ")", ",", "'postf'", ":", "self", ".", "postf", "(", "r", ",",...
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
train
PrefUnGreedy.spec
Returns prefix unary operators list. Sets only one regex for all items in the dict.
sugartex/sugartex_filter.py
def spec(self) -> list: """Returns prefix unary operators list. Sets only one regex for all items in the dict.""" spec = [item for op, pat in self.ops.items() for item in [('{' + op, {'pat': pat, 'postf': self.postf, 'regex': None}), ('˱' + op, {'pat': pat, 'postf': self.postf, 'regex': None})] ] spec[0][1]['regex'] = self.regex_pat.format(_ops_regex(self.ops.keys())) return spec
def spec(self) -> list: """Returns prefix unary operators list. Sets only one regex for all items in the dict.""" spec = [item for op, pat in self.ops.items() for item in [('{' + op, {'pat': pat, 'postf': self.postf, 'regex': None}), ('˱' + op, {'pat': pat, 'postf': self.postf, 'regex': None})] ] spec[0][1]['regex'] = self.regex_pat.format(_ops_regex(self.ops.keys())) return spec
[ "Returns", "prefix", "unary", "operators", "list", ".", "Sets", "only", "one", "regex", "for", "all", "items", "in", "the", "dict", "." ]
kiwi0fruit/sugartex
python
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L242-L251
[ "def", "spec", "(", "self", ")", "->", "list", ":", "spec", "=", "[", "item", "for", "op", ",", "pat", "in", "self", ".", "ops", ".", "items", "(", ")", "for", "item", "in", "[", "(", "'{'", "+", "op", ",", "{", "'pat'", ":", "pat", ",", "'...
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
train
PrefUnOps.fill
Insert: * math styles * other styles * unary prefix operators without brackets * defaults
sugartex/sugartex_filter.py
def fill(self, postf_un_ops: str): """ Insert: * math styles * other styles * unary prefix operators without brackets * defaults """ for op, dic in self.ops.items(): if 'postf' not in dic: dic['postf'] = self.postf self.ops = OrderedDict( self.styles.spec(postf_un_ops) + self.other_styles.spec(postf_un_ops) + self.pref_un_greedy.spec() + list(self.ops.items()) ) for op, dic in self.ops.items(): dic['postf'] = re.compile(dic['postf']) self.regex = _search_regex(self.ops, self.regex_pat)
def fill(self, postf_un_ops: str): """ Insert: * math styles * other styles * unary prefix operators without brackets * defaults """ for op, dic in self.ops.items(): if 'postf' not in dic: dic['postf'] = self.postf self.ops = OrderedDict( self.styles.spec(postf_un_ops) + self.other_styles.spec(postf_un_ops) + self.pref_un_greedy.spec() + list(self.ops.items()) ) for op, dic in self.ops.items(): dic['postf'] = re.compile(dic['postf']) self.regex = _search_regex(self.ops, self.regex_pat)
[ "Insert", ":", "*", "math", "styles", "*", "other", "styles", "*", "unary", "prefix", "operators", "without", "brackets", "*", "defaults" ]
kiwi0fruit/sugartex
python
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L325-L344
[ "def", "fill", "(", "self", ",", "postf_un_ops", ":", "str", ")", ":", "for", "op", ",", "dic", "in", "self", ".", "ops", ".", "items", "(", ")", ":", "if", "'postf'", "not", "in", "dic", ":", "dic", "[", "'postf'", "]", "=", "self", ".", "post...
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
train
PostfUnOps.one_symbol_ops_str
Regex-escaped string with all one-symbol operators
sugartex/sugartex_filter.py
def one_symbol_ops_str(self) -> str: """Regex-escaped string with all one-symbol operators""" return re.escape(''.join((key for key in self.ops.keys() if len(key) == 1)))
def one_symbol_ops_str(self) -> str: """Regex-escaped string with all one-symbol operators""" return re.escape(''.join((key for key in self.ops.keys() if len(key) == 1)))
[ "Regex", "-", "escaped", "string", "with", "all", "one", "-", "symbol", "operators" ]
kiwi0fruit/sugartex
python
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L389-L391
[ "def", "one_symbol_ops_str", "(", "self", ")", "->", "str", ":", "return", "re", ".", "escape", "(", "''", ".", "join", "(", "(", "key", "for", "key", "in", "self", ".", "ops", ".", "keys", "(", ")", "if", "len", "(", "key", ")", "==", "1", ")"...
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
train
SugarTeX._su_scripts_regex
:return: [compiled regex, function]
sugartex/sugartex_filter.py
def _su_scripts_regex(self): """ :return: [compiled regex, function] """ sups = re.escape(''.join([k for k in self.superscripts.keys()])) subs = re.escape(''.join([k for k in self.subscripts.keys()])) # language=PythonRegExp su_regex = (r'\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' + r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format( su_=subs + sups, sub=subs, sup=sups) su_regex = re.compile(su_regex) def su_replace(m): esc, sub, root_sup, sup = m.groups() if esc is not None: return esc elif sub is not None: return '_{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.subscripts[c] for c in sub]) + '}' elif root_sup is not None: return ''.join([self.superscripts[c] for c in root_sup]) elif sup is not None: return '^{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.superscripts[c] for c in sup]) + '}' else: raise TypeError("Regex bug: this should never be reached") return [su_regex, su_replace]
def _su_scripts_regex(self): """ :return: [compiled regex, function] """ sups = re.escape(''.join([k for k in self.superscripts.keys()])) subs = re.escape(''.join([k for k in self.subscripts.keys()])) # language=PythonRegExp su_regex = (r'\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' + r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format( su_=subs + sups, sub=subs, sup=sups) su_regex = re.compile(su_regex) def su_replace(m): esc, sub, root_sup, sup = m.groups() if esc is not None: return esc elif sub is not None: return '_{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.subscripts[c] for c in sub]) + '}' elif root_sup is not None: return ''.join([self.superscripts[c] for c in root_sup]) elif sup is not None: return '^{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.superscripts[c] for c in sup]) + '}' else: raise TypeError("Regex bug: this should never be reached") return [su_regex, su_replace]
[ ":", "return", ":", "[", "compiled", "regex", "function", "]" ]
kiwi0fruit/sugartex
python
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L671-L696
[ "def", "_su_scripts_regex", "(", "self", ")", ":", "sups", "=", "re", ".", "escape", "(", "''", ".", "join", "(", "[", "k", "for", "k", "in", "self", ".", "superscripts", ".", "keys", "(", ")", "]", ")", ")", "subs", "=", "re", ".", "escape", "...
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
train
SugarTeX._local_map
:param match: :param loc: str "l" or "r" or "lr" turns on/off left/right local area calculation :return: list list of the same size as the string + 2 it's the local map that counted { and } list can contain: None or int>=0 from the left of the operator match: in `b}a` if a:0 then }:0 and b:1 in `b{a` if a:0 then {:0 and b:-1(None) from the right of the operator match: in `a{b` if a:0 then {:0 and b:1 in `a}b` if a:0 then }:0 and b:-1(None) Map for +1 (needed for r'$') and -1 (needed for r'^') characters is also stored: +1 -> +1, -1 -> +2
sugartex/sugartex_filter.py
def _local_map(match, loc: str = 'lr') -> list: """ :param match: :param loc: str "l" or "r" or "lr" turns on/off left/right local area calculation :return: list list of the same size as the string + 2 it's the local map that counted { and } list can contain: None or int>=0 from the left of the operator match: in `b}a` if a:0 then }:0 and b:1 in `b{a` if a:0 then {:0 and b:-1(None) from the right of the operator match: in `a{b` if a:0 then {:0 and b:1 in `a}b` if a:0 then }:0 and b:-1(None) Map for +1 (needed for r'$') and -1 (needed for r'^') characters is also stored: +1 -> +1, -1 -> +2 """ s = match.string map_ = [None] * (len(s) + 2) if loc == 'l' or loc == 'lr': balance = 0 for i in reversed(range(0, match.start())): map_[i] = balance c, prev = s[i], (s[i - 1] if i > 0 else '') if (c == '}' or c == '˲') and prev != '\\': balance += 1 elif (c == '{' or c == '˱') and prev != '\\': balance -= 1 if balance < 0: break map_[-1] = balance if loc == 'r' or loc == 'lr': balance = 0 for i in range(match.end(), len(s)): map_[i] = balance c, prev = s[i], s[i - 1] if (c == '{' or c == '˱') and prev != '\\': balance += 1 elif (c == '}' or c == '˲') and prev != '\\': balance -= 1 if balance < 0: break map_[len(s)] = balance return map_
def _local_map(match, loc: str = 'lr') -> list: """ :param match: :param loc: str "l" or "r" or "lr" turns on/off left/right local area calculation :return: list list of the same size as the string + 2 it's the local map that counted { and } list can contain: None or int>=0 from the left of the operator match: in `b}a` if a:0 then }:0 and b:1 in `b{a` if a:0 then {:0 and b:-1(None) from the right of the operator match: in `a{b` if a:0 then {:0 and b:1 in `a}b` if a:0 then }:0 and b:-1(None) Map for +1 (needed for r'$') and -1 (needed for r'^') characters is also stored: +1 -> +1, -1 -> +2 """ s = match.string map_ = [None] * (len(s) + 2) if loc == 'l' or loc == 'lr': balance = 0 for i in reversed(range(0, match.start())): map_[i] = balance c, prev = s[i], (s[i - 1] if i > 0 else '') if (c == '}' or c == '˲') and prev != '\\': balance += 1 elif (c == '{' or c == '˱') and prev != '\\': balance -= 1 if balance < 0: break map_[-1] = balance if loc == 'r' or loc == 'lr': balance = 0 for i in range(match.end(), len(s)): map_[i] = balance c, prev = s[i], s[i - 1] if (c == '{' or c == '˱') and prev != '\\': balance += 1 elif (c == '}' or c == '˲') and prev != '\\': balance -= 1 if balance < 0: break map_[len(s)] = balance return map_
[ ":", "param", "match", ":", ":", "param", "loc", ":", "str", "l", "or", "r", "or", "lr", "turns", "on", "/", "off", "left", "/", "right", "local", "area", "calculation", ":", "return", ":", "list", "list", "of", "the", "same", "size", "as", "the", ...
kiwi0fruit/sugartex
python
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L708-L753
[ "def", "_local_map", "(", "match", ",", "loc", ":", "str", "=", "'lr'", ")", "->", "list", ":", "s", "=", "match", ".", "string", "map_", "=", "[", "None", "]", "*", "(", "len", "(", "s", ")", "+", "2", ")", "if", "loc", "==", "'l'", "or", ...
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
train
SugarTeX._operators_replace
Searches for first unary or binary operator (via self.op_regex that has only one group that contain operator) then replaces it (or escapes it if brackets do not match). Everything until: * space ' ' * begin/end of the string * bracket from outer scope (like '{a/b}': term1=a term2=b) is considered a term (contents of matching brackets '{}' are ignored). Attributes ---------- string: str string to replace
sugartex/sugartex_filter.py
def _operators_replace(self, string: str) -> str: """ Searches for first unary or binary operator (via self.op_regex that has only one group that contain operator) then replaces it (or escapes it if brackets do not match). Everything until: * space ' ' * begin/end of the string * bracket from outer scope (like '{a/b}': term1=a term2=b) is considered a term (contents of matching brackets '{}' are ignored). Attributes ---------- string: str string to replace """ # noinspection PyShadowingNames def replace(string: str, start: int, end: int, substring: str) -> str: return string[0:start] + substring + string[end:len(string)] # noinspection PyShadowingNames def sub_pat(pat: Callable[[list], str] or str, terms: list) -> str: if isinstance(pat, str): return pat.format(*terms) else: return pat(terms) count = 0 def check(): nonlocal count count += 1 if count > self.max_while: raise RuntimeError('Presumably while loop is stuck') # noinspection PyShadowingNames def null_replace(match) -> str: regex_terms = [gr for gr in match.groups() if gr is not None] op = regex_terms[0] terms = regex_terms[1:] return sub_pat(self.null_ops.ops[op]['pat'], terms) string = self.null_ops.regex.sub(null_replace, string) for ops, loc in [(self.pref_un_ops, 'r'), (self.postf_un_ops, 'l'), (self.bin_centr_ops, 'lr')]: count = 0 match = ops.regex.search(string) while match: check() regex_terms = [gr for gr in match.groups() if gr is not None] op = regex_terms[0] loc_map = self._local_map(match, loc) lmatch, rmatch = None, None if loc == 'l' or loc == 'lr': for m in ops.ops[op]['pref'].finditer(string): if m.end() <= match.start() and loc_map[m.end() - 1] == 0: lmatch = m if lmatch is None: string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op)) match = ops.regex.search(string) continue else: term1 = string[lmatch.end():match.start()] if loc == 'r' or loc == 'lr': for m in ops.ops[op]['postf'].finditer(string): if m.start() >= match.end() and loc_map[m.start()] == 0: rmatch = m break if rmatch is None: string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op)) match = ops.regex.search(string) continue else: term2 = string[match.end():rmatch.start()] if loc == 'l': # noinspection PyUnboundLocalVariable terms = list(lmatch.groups()) + [term1] + regex_terms[1:] start, end = lmatch.start(), match.end() elif loc == 'r': # noinspection PyUnboundLocalVariable terms = regex_terms[1:] + [term2] + list(rmatch.groups()) start, end = match.start(), rmatch.end() elif loc == 'lr': terms = list(lmatch.groups()) + [term1] + regex_terms[1:] + [term2] + list(rmatch.groups()) start, end = lmatch.start(), rmatch.end() else: # this never happen terms = regex_terms[1:] start, end = match.start(), match.end() string = replace(string, start, end, sub_pat(ops.ops[op]['pat'], terms)) match = ops.regex.search(string) return string
def _operators_replace(self, string: str) -> str: """ Searches for first unary or binary operator (via self.op_regex that has only one group that contain operator) then replaces it (or escapes it if brackets do not match). Everything until: * space ' ' * begin/end of the string * bracket from outer scope (like '{a/b}': term1=a term2=b) is considered a term (contents of matching brackets '{}' are ignored). Attributes ---------- string: str string to replace """ # noinspection PyShadowingNames def replace(string: str, start: int, end: int, substring: str) -> str: return string[0:start] + substring + string[end:len(string)] # noinspection PyShadowingNames def sub_pat(pat: Callable[[list], str] or str, terms: list) -> str: if isinstance(pat, str): return pat.format(*terms) else: return pat(terms) count = 0 def check(): nonlocal count count += 1 if count > self.max_while: raise RuntimeError('Presumably while loop is stuck') # noinspection PyShadowingNames def null_replace(match) -> str: regex_terms = [gr for gr in match.groups() if gr is not None] op = regex_terms[0] terms = regex_terms[1:] return sub_pat(self.null_ops.ops[op]['pat'], terms) string = self.null_ops.regex.sub(null_replace, string) for ops, loc in [(self.pref_un_ops, 'r'), (self.postf_un_ops, 'l'), (self.bin_centr_ops, 'lr')]: count = 0 match = ops.regex.search(string) while match: check() regex_terms = [gr for gr in match.groups() if gr is not None] op = regex_terms[0] loc_map = self._local_map(match, loc) lmatch, rmatch = None, None if loc == 'l' or loc == 'lr': for m in ops.ops[op]['pref'].finditer(string): if m.end() <= match.start() and loc_map[m.end() - 1] == 0: lmatch = m if lmatch is None: string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op)) match = ops.regex.search(string) continue else: term1 = string[lmatch.end():match.start()] if loc == 'r' or loc == 'lr': for m in ops.ops[op]['postf'].finditer(string): if m.start() >= match.end() and loc_map[m.start()] == 0: rmatch = m break if rmatch is None: string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op)) match = ops.regex.search(string) continue else: term2 = string[match.end():rmatch.start()] if loc == 'l': # noinspection PyUnboundLocalVariable terms = list(lmatch.groups()) + [term1] + regex_terms[1:] start, end = lmatch.start(), match.end() elif loc == 'r': # noinspection PyUnboundLocalVariable terms = regex_terms[1:] + [term2] + list(rmatch.groups()) start, end = match.start(), rmatch.end() elif loc == 'lr': terms = list(lmatch.groups()) + [term1] + regex_terms[1:] + [term2] + list(rmatch.groups()) start, end = lmatch.start(), rmatch.end() else: # this never happen terms = regex_terms[1:] start, end = match.start(), match.end() string = replace(string, start, end, sub_pat(ops.ops[op]['pat'], terms)) match = ops.regex.search(string) return string
[ "Searches", "for", "first", "unary", "or", "binary", "operator", "(", "via", "self", ".", "op_regex", "that", "has", "only", "one", "group", "that", "contain", "operator", ")", "then", "replaces", "it", "(", "or", "escapes", "it", "if", "brackets", "do", ...
kiwi0fruit/sugartex
python
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L755-L849
[ "def", "_operators_replace", "(", "self", ",", "string", ":", "str", ")", "->", "str", ":", "# noinspection PyShadowingNames", "def", "replace", "(", "string", ":", "str", ",", "start", ":", "int", ",", "end", ":", "int", ",", "substring", ":", "str", ")...
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
train
SugarTeX.replace
Extends LaTeX syntax via regex preprocess :param src: str LaTeX string :return: str New LaTeX string
sugartex/sugartex_filter.py
def replace(self, src: str) -> str: """ Extends LaTeX syntax via regex preprocess :param src: str LaTeX string :return: str New LaTeX string """ if not self.readied: self.ready() # Brackets + simple pre replacements: src = self._dict_replace(self.simple_pre, src) # Superscripts and subscripts + pre regexps: for regex, replace in self.regex_pre: src = regex.sub(replace, src) # Unary and binary operators: src = self._operators_replace(src) # Loop regexps: src_prev = src for i in range(self.max_iter): for regex, replace in self.loop_regexps: src = regex.sub(replace, src) if src_prev == src: break else: src_prev = src # Post regexps: for regex, replace in self.regex_post: src = regex.sub(replace, src) # Simple post replacements: src = self._dict_replace(self.simple_post, src) # Escape characters: src = self.escapes_regex.sub(r'\1', src) return src
def replace(self, src: str) -> str: """ Extends LaTeX syntax via regex preprocess :param src: str LaTeX string :return: str New LaTeX string """ if not self.readied: self.ready() # Brackets + simple pre replacements: src = self._dict_replace(self.simple_pre, src) # Superscripts and subscripts + pre regexps: for regex, replace in self.regex_pre: src = regex.sub(replace, src) # Unary and binary operators: src = self._operators_replace(src) # Loop regexps: src_prev = src for i in range(self.max_iter): for regex, replace in self.loop_regexps: src = regex.sub(replace, src) if src_prev == src: break else: src_prev = src # Post regexps: for regex, replace in self.regex_post: src = regex.sub(replace, src) # Simple post replacements: src = self._dict_replace(self.simple_post, src) # Escape characters: src = self.escapes_regex.sub(r'\1', src) return src
[ "Extends", "LaTeX", "syntax", "via", "regex", "preprocess", ":", "param", "src", ":", "str", "LaTeX", "string", ":", "return", ":", "str", "New", "LaTeX", "string" ]
kiwi0fruit/sugartex
python
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L863-L904
[ "def", "replace", "(", "self", ",", "src", ":", "str", ")", "->", "str", ":", "if", "not", "self", ".", "readied", ":", "self", ".", "ready", "(", ")", "# Brackets + simple pre replacements:", "src", "=", "self", ".", "_dict_replace", "(", "self", ".", ...
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
train
plot_gaps
plot % of gaps at each position
ctbBio/strip_align.py
def plot_gaps(plot, columns): """ plot % of gaps at each position """ from plot_window import window_plot_convolve as plot_window # plot_window([columns], len(columns)*.01, plot) plot_window([[100 - i for i in columns]], len(columns)*.01, plot)
def plot_gaps(plot, columns): """ plot % of gaps at each position """ from plot_window import window_plot_convolve as plot_window # plot_window([columns], len(columns)*.01, plot) plot_window([[100 - i for i in columns]], len(columns)*.01, plot)
[ "plot", "%", "of", "gaps", "at", "each", "position" ]
christophertbrown/bioscripts
python
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/strip_align.py#L11-L17
[ "def", "plot_gaps", "(", "plot", ",", "columns", ")", ":", "from", "plot_window", "import", "window_plot_convolve", "as", "plot_window", "#\tplot_window([columns], len(columns)*.01, plot)", "plot_window", "(", "[", "[", "100", "-", "i", "for", "i", "in", "columns", ...
83b2566b3a5745437ec651cd6cafddd056846240
train
strip_msa_100
strip out columns of a MSA that represent gaps for X percent (threshold) of sequences
ctbBio/strip_align.py
def strip_msa_100(msa, threshold, plot = False): """ strip out columns of a MSA that represent gaps for X percent (threshold) of sequences """ msa = [seq for seq in parse_fasta(msa)] columns = [[0, 0] for pos in msa[0][1]] # [[#bases, #gaps], [#bases, #gaps], ...] for seq in msa: for position, base in enumerate(seq[1]): if base == '-' or base == '.': columns[position][1] += 1 else: columns[position][0] += 1 columns = [float(float(g)/float(g+b)*100) for b, g in columns] # convert to percent gaps for seq in msa: stripped = [] for position, base in enumerate(seq[1]): if columns[position] < threshold: stripped.append(base) yield [seq[0], ''.join(stripped)] if plot is not False: plot_gaps(plot, columns)
def strip_msa_100(msa, threshold, plot = False): """ strip out columns of a MSA that represent gaps for X percent (threshold) of sequences """ msa = [seq for seq in parse_fasta(msa)] columns = [[0, 0] for pos in msa[0][1]] # [[#bases, #gaps], [#bases, #gaps], ...] for seq in msa: for position, base in enumerate(seq[1]): if base == '-' or base == '.': columns[position][1] += 1 else: columns[position][0] += 1 columns = [float(float(g)/float(g+b)*100) for b, g in columns] # convert to percent gaps for seq in msa: stripped = [] for position, base in enumerate(seq[1]): if columns[position] < threshold: stripped.append(base) yield [seq[0], ''.join(stripped)] if plot is not False: plot_gaps(plot, columns)
[ "strip", "out", "columns", "of", "a", "MSA", "that", "represent", "gaps", "for", "X", "percent", "(", "threshold", ")", "of", "sequences" ]
christophertbrown/bioscripts
python
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/strip_align.py#L19-L39
[ "def", "strip_msa_100", "(", "msa", ",", "threshold", ",", "plot", "=", "False", ")", ":", "msa", "=", "[", "seq", "for", "seq", "in", "parse_fasta", "(", "msa", ")", "]", "columns", "=", "[", "[", "0", ",", "0", "]", "for", "pos", "in", "msa", ...
83b2566b3a5745437ec651cd6cafddd056846240
train
sample_group
Iterate through all categories in an OrderedDict and return category name if SampleID present in that category. :type sid: str :param sid: SampleID from dataset. :type groups: OrderedDict :param groups: Returned dict from phylotoast.util.gather_categories() function. :return type: str :return: Category name used to classify `sid`.
bin/extract_shared_or_unique_otuids.py
def sample_group(sid, groups): """ Iterate through all categories in an OrderedDict and return category name if SampleID present in that category. :type sid: str :param sid: SampleID from dataset. :type groups: OrderedDict :param groups: Returned dict from phylotoast.util.gather_categories() function. :return type: str :return: Category name used to classify `sid`. """ for name in groups: if sid in groups[name].sids: return name
def sample_group(sid, groups): """ Iterate through all categories in an OrderedDict and return category name if SampleID present in that category. :type sid: str :param sid: SampleID from dataset. :type groups: OrderedDict :param groups: Returned dict from phylotoast.util.gather_categories() function. :return type: str :return: Category name used to classify `sid`. """ for name in groups: if sid in groups[name].sids: return name
[ "Iterate", "through", "all", "categories", "in", "an", "OrderedDict", "and", "return", "category", "name", "if", "SampleID", "present", "in", "that", "category", "." ]
smdabdoub/phylotoast
python
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/extract_shared_or_unique_otuids.py#L22-L38
[ "def", "sample_group", "(", "sid", ",", "groups", ")", ":", "for", "name", "in", "groups", ":", "if", "sid", "in", "groups", "[", "name", "]", ".", "sids", ":", "return", "name" ]
0b74ef171e6a84761710548501dfac71285a58a3
train
combine_sets
Combine multiple sets to create a single larger set.
bin/extract_shared_or_unique_otuids.py
def combine_sets(*sets): """ Combine multiple sets to create a single larger set. """ combined = set() for s in sets: combined.update(s) return combined
def combine_sets(*sets): """ Combine multiple sets to create a single larger set. """ combined = set() for s in sets: combined.update(s) return combined
[ "Combine", "multiple", "sets", "to", "create", "a", "single", "larger", "set", "." ]
smdabdoub/phylotoast
python
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/extract_shared_or_unique_otuids.py#L41-L48
[ "def", "combine_sets", "(", "*", "sets", ")", ":", "combined", "=", "set", "(", ")", "for", "s", "in", "sets", ":", "combined", ".", "update", "(", "s", ")", "return", "combined" ]
0b74ef171e6a84761710548501dfac71285a58a3
train
unique_otuids
Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values.
bin/extract_shared_or_unique_otuids.py
def unique_otuids(groups): """ Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values. """ uniques = {key: set() for key in groups} for i, group in enumerate(groups): to_combine = groups.values()[:i]+groups.values()[i+1:] combined = combine_sets(*to_combine) uniques[group] = groups[group].difference(combined) return uniques
def unique_otuids(groups): """ Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values. """ uniques = {key: set() for key in groups} for i, group in enumerate(groups): to_combine = groups.values()[:i]+groups.values()[i+1:] combined = combine_sets(*to_combine) uniques[group] = groups[group].difference(combined) return uniques
[ "Get", "unique", "OTUIDs", "of", "each", "category", "." ]
smdabdoub/phylotoast
python
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/extract_shared_or_unique_otuids.py#L51-L66
[ "def", "unique_otuids", "(", "groups", ")", ":", "uniques", "=", "{", "key", ":", "set", "(", ")", "for", "key", "in", "groups", "}", "for", "i", ",", "group", "in", "enumerate", "(", "groups", ")", ":", "to_combine", "=", "groups", ".", "values", ...
0b74ef171e6a84761710548501dfac71285a58a3
train
shared_otuids
Get shared OTUIDs between all unique combinations of groups. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on group combination and their shared OTUIDs as values.
bin/extract_shared_or_unique_otuids.py
def shared_otuids(groups): """ Get shared OTUIDs between all unique combinations of groups. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on group combination and their shared OTUIDs as values. """ for g in sorted(groups): print("Number of OTUs in {0}: {1}".format(g, len(groups[g].results["otuids"]))) number_of_categories = len(groups) shared = defaultdict() for i in range(2, number_of_categories+1): for j in combinations(sorted(groups), i): combo_name = " & ".join(list(j)) for grp in j: # initialize combo values shared[combo_name] = groups[j[0]].results["otuids"].copy() """iterate through all groups and keep updating combo OTUIDs with set intersection_update""" for grp in j[1:]: shared[combo_name].intersection_update(groups[grp].results["otuids"]) return shared
def shared_otuids(groups): """ Get shared OTUIDs between all unique combinations of groups. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on group combination and their shared OTUIDs as values. """ for g in sorted(groups): print("Number of OTUs in {0}: {1}".format(g, len(groups[g].results["otuids"]))) number_of_categories = len(groups) shared = defaultdict() for i in range(2, number_of_categories+1): for j in combinations(sorted(groups), i): combo_name = " & ".join(list(j)) for grp in j: # initialize combo values shared[combo_name] = groups[j[0]].results["otuids"].copy() """iterate through all groups and keep updating combo OTUIDs with set intersection_update""" for grp in j[1:]: shared[combo_name].intersection_update(groups[grp].results["otuids"]) return shared
[ "Get", "shared", "OTUIDs", "between", "all", "unique", "combinations", "of", "groups", "." ]
smdabdoub/phylotoast
python
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/extract_shared_or_unique_otuids.py#L69-L93
[ "def", "shared_otuids", "(", "groups", ")", ":", "for", "g", "in", "sorted", "(", "groups", ")", ":", "print", "(", "\"Number of OTUs in {0}: {1}\"", ".", "format", "(", "g", ",", "len", "(", "groups", "[", "g", "]", ".", "results", "[", "\"otuids\"", ...
0b74ef171e6a84761710548501dfac71285a58a3
train
write_uniques
Given a path, the method writes out one file for each group name in the uniques dictionary with the file name in the pattern PATH/prefix_group.txt with each file containing the unique OTUIDs found when comparing that group to all the other groups in uniques. :type path: str :param path: Output files will be saved in this PATH. :type prefix: str :param prefix: Prefix name added in front of output filename. :type uniques: dict :param uniques: Output from unique_otus() function.
bin/extract_shared_or_unique_otuids.py
def write_uniques(path, prefix, uniques): """ Given a path, the method writes out one file for each group name in the uniques dictionary with the file name in the pattern PATH/prefix_group.txt with each file containing the unique OTUIDs found when comparing that group to all the other groups in uniques. :type path: str :param path: Output files will be saved in this PATH. :type prefix: str :param prefix: Prefix name added in front of output filename. :type uniques: dict :param uniques: Output from unique_otus() function. """ for group in uniques: fp = osp.join(path, "{}_{}.txt".format(prefix, group)) with open(fp, "w") as outf: outf.write("\n".join(uniques[group]))
def write_uniques(path, prefix, uniques): """ Given a path, the method writes out one file for each group name in the uniques dictionary with the file name in the pattern PATH/prefix_group.txt with each file containing the unique OTUIDs found when comparing that group to all the other groups in uniques. :type path: str :param path: Output files will be saved in this PATH. :type prefix: str :param prefix: Prefix name added in front of output filename. :type uniques: dict :param uniques: Output from unique_otus() function. """ for group in uniques: fp = osp.join(path, "{}_{}.txt".format(prefix, group)) with open(fp, "w") as outf: outf.write("\n".join(uniques[group]))
[ "Given", "a", "path", "the", "method", "writes", "out", "one", "file", "for", "each", "group", "name", "in", "the", "uniques", "dictionary", "with", "the", "file", "name", "in", "the", "pattern" ]
smdabdoub/phylotoast
python
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/extract_shared_or_unique_otuids.py#L96-L118
[ "def", "write_uniques", "(", "path", ",", "prefix", ",", "uniques", ")", ":", "for", "group", "in", "uniques", ":", "fp", "=", "osp", ".", "join", "(", "path", ",", "\"{}_{}.txt\"", ".", "format", "(", "prefix", ",", "group", ")", ")", "with", "open"...
0b74ef171e6a84761710548501dfac71285a58a3
train
storeFASTA
Parse the records in a FASTA-format file by first reading the entire file into memory. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data.
phylotoast/util.py
def storeFASTA(fastaFNH): """ Parse the records in a FASTA-format file by first reading the entire file into memory. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data. """ fasta = file_handle(fastaFNH).read() return [FASTARecord(rec[0].split()[0], rec[0].split(None, 1)[1], "".join(rec[1:])) for rec in (x.strip().split("\n") for x in fasta.split(">")[1:])]
def storeFASTA(fastaFNH): """ Parse the records in a FASTA-format file by first reading the entire file into memory. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data. """ fasta = file_handle(fastaFNH).read() return [FASTARecord(rec[0].split()[0], rec[0].split(None, 1)[1], "".join(rec[1:])) for rec in (x.strip().split("\n") for x in fasta.split(">")[1:])]
[ "Parse", "the", "records", "in", "a", "FASTA", "-", "format", "file", "by", "first", "reading", "the", "entire", "file", "into", "memory", "." ]
smdabdoub/phylotoast
python
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L20-L34
[ "def", "storeFASTA", "(", "fastaFNH", ")", ":", "fasta", "=", "file_handle", "(", "fastaFNH", ")", ".", "read", "(", ")", "return", "[", "FASTARecord", "(", "rec", "[", "0", "]", ".", "split", "(", ")", "[", "0", "]", ",", "rec", "[", "0", "]", ...
0b74ef171e6a84761710548501dfac71285a58a3
train
parseFASTA
Parse the records in a FASTA-format file keeping the file open, and reading through one line at a time. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data.
phylotoast/util.py
def parseFASTA(fastaFNH): """ Parse the records in a FASTA-format file keeping the file open, and reading through one line at a time. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data. """ recs = [] seq = [] seqID = "" descr = "" for line in file_handle(fastaFNH): line = line.strip() if line[0] == ";": continue if line[0] == ">": # conclude previous record if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) seq = [] # start new record line = line[1:].split(None, 1) seqID, descr = line[0], line[1] else: seq.append(line) # catch last seq in file if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) return recs
def parseFASTA(fastaFNH): """ Parse the records in a FASTA-format file keeping the file open, and reading through one line at a time. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data. """ recs = [] seq = [] seqID = "" descr = "" for line in file_handle(fastaFNH): line = line.strip() if line[0] == ";": continue if line[0] == ">": # conclude previous record if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) seq = [] # start new record line = line[1:].split(None, 1) seqID, descr = line[0], line[1] else: seq.append(line) # catch last seq in file if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) return recs
[ "Parse", "the", "records", "in", "a", "FASTA", "-", "format", "file", "keeping", "the", "file", "open", "and", "reading", "through", "one", "line", "at", "a", "time", "." ]
smdabdoub/phylotoast
python
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L37-L73
[ "def", "parseFASTA", "(", "fastaFNH", ")", ":", "recs", "=", "[", "]", "seq", "=", "[", "]", "seqID", "=", "\"\"", "descr", "=", "\"\"", "for", "line", "in", "file_handle", "(", "fastaFNH", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "...
0b74ef171e6a84761710548501dfac71285a58a3
train
parse_map_file
Opens a QIIME mapping file and stores the contents in a dictionary keyed on SampleID (default) or a user-supplied one. The only required fields are SampleID, BarcodeSequence, LinkerPrimerSequence (in that order), and Description (which must be the final field). :type mapFNH: str :param mapFNH: Either the full path to the map file or an open file handle :rtype: tuple, dict :return: A tuple of header line for mapping file and a map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID). An OrderedDict is used for mapping so the returned map is guaranteed to have the same order as the input file. Example data: #SampleID BarcodeSequence LinkerPrimerSequence State Description 11.V13 ACGCTCGACA GTTTGATCCTGGCTCAG Disease Rat_Oral
phylotoast/util.py
def parse_map_file(mapFNH): """ Opens a QIIME mapping file and stores the contents in a dictionary keyed on SampleID (default) or a user-supplied one. The only required fields are SampleID, BarcodeSequence, LinkerPrimerSequence (in that order), and Description (which must be the final field). :type mapFNH: str :param mapFNH: Either the full path to the map file or an open file handle :rtype: tuple, dict :return: A tuple of header line for mapping file and a map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID). An OrderedDict is used for mapping so the returned map is guaranteed to have the same order as the input file. Example data: #SampleID BarcodeSequence LinkerPrimerSequence State Description 11.V13 ACGCTCGACA GTTTGATCCTGGCTCAG Disease Rat_Oral """ m = OrderedDict() map_header = None with file_handle(mapFNH) as mapF: for line in mapF: if line.startswith("#SampleID"): map_header = line.strip().split("\t") if line.startswith("#") or not line: continue line = line.strip().split("\t") m[line[0]] = line return map_header, m
def parse_map_file(mapFNH): """ Opens a QIIME mapping file and stores the contents in a dictionary keyed on SampleID (default) or a user-supplied one. The only required fields are SampleID, BarcodeSequence, LinkerPrimerSequence (in that order), and Description (which must be the final field). :type mapFNH: str :param mapFNH: Either the full path to the map file or an open file handle :rtype: tuple, dict :return: A tuple of header line for mapping file and a map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID). An OrderedDict is used for mapping so the returned map is guaranteed to have the same order as the input file. Example data: #SampleID BarcodeSequence LinkerPrimerSequence State Description 11.V13 ACGCTCGACA GTTTGATCCTGGCTCAG Disease Rat_Oral """ m = OrderedDict() map_header = None with file_handle(mapFNH) as mapF: for line in mapF: if line.startswith("#SampleID"): map_header = line.strip().split("\t") if line.startswith("#") or not line: continue line = line.strip().split("\t") m[line[0]] = line return map_header, m
[ "Opens", "a", "QIIME", "mapping", "file", "and", "stores", "the", "contents", "in", "a", "dictionary", "keyed", "on", "SampleID", "(", "default", ")", "or", "a", "user", "-", "supplied", "one", ".", "The", "only", "required", "fields", "are", "SampleID", ...
smdabdoub/phylotoast
python
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L76-L108
[ "def", "parse_map_file", "(", "mapFNH", ")", ":", "m", "=", "OrderedDict", "(", ")", "map_header", "=", "None", "with", "file_handle", "(", "mapFNH", ")", "as", "mapF", ":", "for", "line", "in", "mapF", ":", "if", "line", ".", "startswith", "(", "\"#Sa...
0b74ef171e6a84761710548501dfac71285a58a3
valid
learn
Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function.
baselines/deepq/deepq.py
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_variables(model_file) return act
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_variables(model_file) return act
[ "Train", "a", "deepq", "model", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/deepq.py#L95-L333
[ "def", "learn", "(", "env", ",", "network", ",", "seed", "=", "None", ",", "lr", "=", "5e-4", ",", "total_timesteps", "=", "100000", ",", "buffer_size", "=", "50000", ",", "exploration_fraction", "=", "0.1", ",", "exploration_final_eps", "=", "0.02", ",", ...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
ActWrapper.save_act
Save model to a pickle located at `path`
baselines/deepq/deepq.py
def save_act(self, path=None): """Save model to a pickle located at `path`""" if path is None: path = os.path.join(logger.get_dir(), "model.pkl") with tempfile.TemporaryDirectory() as td: save_variables(os.path.join(td, "model")) arc_name = os.path.join(td, "packed.zip") with zipfile.ZipFile(arc_name, 'w') as zipf: for root, dirs, files in os.walk(td): for fname in files: file_path = os.path.join(root, fname) if file_path != arc_name: zipf.write(file_path, os.path.relpath(file_path, td)) with open(arc_name, "rb") as f: model_data = f.read() with open(path, "wb") as f: cloudpickle.dump((model_data, self._act_params), f)
def save_act(self, path=None): """Save model to a pickle located at `path`""" if path is None: path = os.path.join(logger.get_dir(), "model.pkl") with tempfile.TemporaryDirectory() as td: save_variables(os.path.join(td, "model")) arc_name = os.path.join(td, "packed.zip") with zipfile.ZipFile(arc_name, 'w') as zipf: for root, dirs, files in os.walk(td): for fname in files: file_path = os.path.join(root, fname) if file_path != arc_name: zipf.write(file_path, os.path.relpath(file_path, td)) with open(arc_name, "rb") as f: model_data = f.read() with open(path, "wb") as f: cloudpickle.dump((model_data, self._act_params), f)
[ "Save", "model", "to", "a", "pickle", "located", "at", "path" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/deepq.py#L55-L72
[ "def", "save_act", "(", "self", ",", "path", "=", "None", ")", ":", "if", "path", "is", "None", ":", "path", "=", "os", ".", "path", ".", "join", "(", "logger", ".", "get_dir", "(", ")", ",", "\"model.pkl\"", ")", "with", "tempfile", ".", "Temporar...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
nature_cnn
CNN from Nature paper.
baselines/common/models.py
def nature_cnn(unscaled_images, **conv_kwargs): """ CNN from Nature paper. """ scaled_images = tf.cast(unscaled_images, tf.float32) / 255. activ = tf.nn.relu h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs)) h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs)) h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs)) h3 = conv_to_fc(h3) return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))
def nature_cnn(unscaled_images, **conv_kwargs): """ CNN from Nature paper. """ scaled_images = tf.cast(unscaled_images, tf.float32) / 255. activ = tf.nn.relu h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs)) h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs)) h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs)) h3 = conv_to_fc(h3) return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))
[ "CNN", "from", "Nature", "paper", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L16-L27
[ "def", "nature_cnn", "(", "unscaled_images", ",", "*", "*", "conv_kwargs", ")", ":", "scaled_images", "=", "tf", ".", "cast", "(", "unscaled_images", ",", "tf", ".", "float32", ")", "/", "255.", "activ", "=", "tf", ".", "nn", ".", "relu", "h", "=", "...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
mlp
Stack of fully-connected layers to be used in a policy / q-function approximator Parameters: ---------- num_layers: int number of fully-connected layers (default: 2) num_hidden: int size of fully-connected layers (default: 64) activation: activation function (default: tf.tanh) Returns: ------- function that builds fully connected network with a given input tensor / placeholder
baselines/common/models.py
def mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False): """ Stack of fully-connected layers to be used in a policy / q-function approximator Parameters: ---------- num_layers: int number of fully-connected layers (default: 2) num_hidden: int size of fully-connected layers (default: 64) activation: activation function (default: tf.tanh) Returns: ------- function that builds fully connected network with a given input tensor / placeholder """ def network_fn(X): h = tf.layers.flatten(X) for i in range(num_layers): h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2)) if layer_norm: h = tf.contrib.layers.layer_norm(h, center=True, scale=True) h = activation(h) return h return network_fn
def mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False): """ Stack of fully-connected layers to be used in a policy / q-function approximator Parameters: ---------- num_layers: int number of fully-connected layers (default: 2) num_hidden: int size of fully-connected layers (default: 64) activation: activation function (default: tf.tanh) Returns: ------- function that builds fully connected network with a given input tensor / placeholder """ def network_fn(X): h = tf.layers.flatten(X) for i in range(num_layers): h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2)) if layer_norm: h = tf.contrib.layers.layer_norm(h, center=True, scale=True) h = activation(h) return h return network_fn
[ "Stack", "of", "fully", "-", "connected", "layers", "to", "be", "used", "in", "a", "policy", "/", "q", "-", "function", "approximator" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L31-L59
[ "def", "mlp", "(", "num_layers", "=", "2", ",", "num_hidden", "=", "64", ",", "activation", "=", "tf", ".", "tanh", ",", "layer_norm", "=", "False", ")", ":", "def", "network_fn", "(", "X", ")", ":", "h", "=", "tf", ".", "layers", ".", "flatten", ...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
lstm
Builds LSTM (Long-Short Term Memory) network to be used in a policy. Note that the resulting function returns not only the output of the LSTM (i.e. hidden state of lstm for each step in the sequence), but also a dictionary with auxiliary tensors to be set as policy attributes. Specifically, S is a placeholder to feed current state (LSTM state has to be managed outside policy) M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too) initial_state is a numpy array containing initial lstm state (usually zeros) state is the output LSTM state (to be fed into S at the next call) An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example Parameters: ---------- nlstm: int LSTM hidden state size layer_norm: bool if True, layer-normalized version of LSTM is used Returns: ------- function that builds LSTM with a given input tensor / placeholder
baselines/common/models.py
def lstm(nlstm=128, layer_norm=False): """ Builds LSTM (Long-Short Term Memory) network to be used in a policy. Note that the resulting function returns not only the output of the LSTM (i.e. hidden state of lstm for each step in the sequence), but also a dictionary with auxiliary tensors to be set as policy attributes. Specifically, S is a placeholder to feed current state (LSTM state has to be managed outside policy) M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too) initial_state is a numpy array containing initial lstm state (usually zeros) state is the output LSTM state (to be fed into S at the next call) An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example Parameters: ---------- nlstm: int LSTM hidden state size layer_norm: bool if True, layer-normalized version of LSTM is used Returns: ------- function that builds LSTM with a given input tensor / placeholder """ def network_fn(X, nenv=1): nbatch = X.shape[0] nsteps = nbatch // nenv h = tf.layers.flatten(X) M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1) S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states xs = batch_to_seq(h, nenv, nsteps) ms = batch_to_seq(M, nenv, nsteps) if layer_norm: h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm) else: h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm) h = seq_to_batch(h5) initial_state = np.zeros(S.shape.as_list(), dtype=float) return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state} return network_fn
def lstm(nlstm=128, layer_norm=False): """ Builds LSTM (Long-Short Term Memory) network to be used in a policy. Note that the resulting function returns not only the output of the LSTM (i.e. hidden state of lstm for each step in the sequence), but also a dictionary with auxiliary tensors to be set as policy attributes. Specifically, S is a placeholder to feed current state (LSTM state has to be managed outside policy) M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too) initial_state is a numpy array containing initial lstm state (usually zeros) state is the output LSTM state (to be fed into S at the next call) An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example Parameters: ---------- nlstm: int LSTM hidden state size layer_norm: bool if True, layer-normalized version of LSTM is used Returns: ------- function that builds LSTM with a given input tensor / placeholder """ def network_fn(X, nenv=1): nbatch = X.shape[0] nsteps = nbatch // nenv h = tf.layers.flatten(X) M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1) S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states xs = batch_to_seq(h, nenv, nsteps) ms = batch_to_seq(M, nenv, nsteps) if layer_norm: h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm) else: h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm) h = seq_to_batch(h5) initial_state = np.zeros(S.shape.as_list(), dtype=float) return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state} return network_fn
[ "Builds", "LSTM", "(", "Long", "-", "Short", "Term", "Memory", ")", "network", "to", "be", "used", "in", "a", "policy", ".", "Note", "that", "the", "resulting", "function", "returns", "not", "only", "the", "output", "of", "the", "LSTM", "(", "i", ".", ...
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L84-L135
[ "def", "lstm", "(", "nlstm", "=", "128", ",", "layer_norm", "=", "False", ")", ":", "def", "network_fn", "(", "X", ",", "nenv", "=", "1", ")", ":", "nbatch", "=", "X", ".", "shape", "[", "0", "]", "nsteps", "=", "nbatch", "//", "nenv", "h", "="...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
conv_only
convolutions-only net Parameters: ---------- conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer. Returns: function that takes tensorflow tensor as input and returns the output of the last convolutional layer
baselines/common/models.py
def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs): ''' convolutions-only net Parameters: ---------- conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer. Returns: function that takes tensorflow tensor as input and returns the output of the last convolutional layer ''' def network_fn(X): out = tf.cast(X, tf.float32) / 255. with tf.variable_scope("convnet"): for num_outputs, kernel_size, stride in convs: out = layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu, **conv_kwargs) return out return network_fn
def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs): ''' convolutions-only net Parameters: ---------- conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer. Returns: function that takes tensorflow tensor as input and returns the output of the last convolutional layer ''' def network_fn(X): out = tf.cast(X, tf.float32) / 255. with tf.variable_scope("convnet"): for num_outputs, kernel_size, stride in convs: out = layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu, **conv_kwargs) return out return network_fn
[ "convolutions", "-", "only", "net" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L171-L198
[ "def", "conv_only", "(", "convs", "=", "[", "(", "32", ",", "8", ",", "4", ")", ",", "(", "64", ",", "4", ",", "2", ")", ",", "(", "64", ",", "3", ",", "1", ")", "]", ",", "*", "*", "conv_kwargs", ")", ":", "def", "network_fn", "(", "X", ...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
get_network_builder
If you want to register your own network outside models.py, you just need: Usage Example: ------------- from baselines.common.models import register @register("your_network_name") def your_network_define(**net_kwargs): ... return network_fn
baselines/common/models.py
def get_network_builder(name): """ If you want to register your own network outside models.py, you just need: Usage Example: ------------- from baselines.common.models import register @register("your_network_name") def your_network_define(**net_kwargs): ... return network_fn """ if callable(name): return name elif name in mapping: return mapping[name] else: raise ValueError('Unknown network type: {}'.format(name))
def get_network_builder(name): """ If you want to register your own network outside models.py, you just need: Usage Example: ------------- from baselines.common.models import register @register("your_network_name") def your_network_define(**net_kwargs): ... return network_fn """ if callable(name): return name elif name in mapping: return mapping[name] else: raise ValueError('Unknown network type: {}'.format(name))
[ "If", "you", "want", "to", "register", "your", "own", "network", "outside", "models", ".", "py", "you", "just", "need", ":" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L206-L224
[ "def", "get_network_builder", "(", "name", ")", ":", "if", "callable", "(", "name", ")", ":", "return", "name", "elif", "name", "in", "mapping", ":", "return", "mapping", "[", "name", "]", "else", ":", "raise", "ValueError", "(", "'Unknown network type: {}'"...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
mlp
This model takes as input an observation and returns values of all actions. Parameters ---------- hiddens: [int] list of sizes of hidden layers layer_norm: bool if true applies layer normalization for every layer as described in https://arxiv.org/abs/1607.06450 Returns ------- q_func: function q_function for DQN algorithm.
baselines/deepq/models.py
def mlp(hiddens=[], layer_norm=False): """This model takes as input an observation and returns values of all actions. Parameters ---------- hiddens: [int] list of sizes of hidden layers layer_norm: bool if true applies layer normalization for every layer as described in https://arxiv.org/abs/1607.06450 Returns ------- q_func: function q_function for DQN algorithm. """ return lambda *args, **kwargs: _mlp(hiddens, layer_norm=layer_norm, *args, **kwargs)
def mlp(hiddens=[], layer_norm=False): """This model takes as input an observation and returns values of all actions. Parameters ---------- hiddens: [int] list of sizes of hidden layers layer_norm: bool if true applies layer normalization for every layer as described in https://arxiv.org/abs/1607.06450 Returns ------- q_func: function q_function for DQN algorithm. """ return lambda *args, **kwargs: _mlp(hiddens, layer_norm=layer_norm, *args, **kwargs)
[ "This", "model", "takes", "as", "input", "an", "observation", "and", "returns", "values", "of", "all", "actions", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/models.py#L17-L33
[ "def", "mlp", "(", "hiddens", "=", "[", "]", ",", "layer_norm", "=", "False", ")", ":", "return", "lambda", "*", "args", ",", "*", "*", "kwargs", ":", "_mlp", "(", "hiddens", ",", "layer_norm", "=", "layer_norm", ",", "*", "args", ",", "*", "*", ...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
cnn_to_mlp
This model takes as input an observation and returns values of all actions. Parameters ---------- convs: [(int, int, int)] list of convolutional layers in form of (num_outputs, kernel_size, stride) hiddens: [int] list of sizes of hidden layers dueling: bool if true double the output MLP to compute a baseline for action scores layer_norm: bool if true applies layer normalization for every layer as described in https://arxiv.org/abs/1607.06450 Returns ------- q_func: function q_function for DQN algorithm.
baselines/deepq/models.py
def cnn_to_mlp(convs, hiddens, dueling=False, layer_norm=False): """This model takes as input an observation and returns values of all actions. Parameters ---------- convs: [(int, int, int)] list of convolutional layers in form of (num_outputs, kernel_size, stride) hiddens: [int] list of sizes of hidden layers dueling: bool if true double the output MLP to compute a baseline for action scores layer_norm: bool if true applies layer normalization for every layer as described in https://arxiv.org/abs/1607.06450 Returns ------- q_func: function q_function for DQN algorithm. """ return lambda *args, **kwargs: _cnn_to_mlp(convs, hiddens, dueling, layer_norm=layer_norm, *args, **kwargs)
def cnn_to_mlp(convs, hiddens, dueling=False, layer_norm=False): """This model takes as input an observation and returns values of all actions. Parameters ---------- convs: [(int, int, int)] list of convolutional layers in form of (num_outputs, kernel_size, stride) hiddens: [int] list of sizes of hidden layers dueling: bool if true double the output MLP to compute a baseline for action scores layer_norm: bool if true applies layer normalization for every layer as described in https://arxiv.org/abs/1607.06450 Returns ------- q_func: function q_function for DQN algorithm. """ return lambda *args, **kwargs: _cnn_to_mlp(convs, hiddens, dueling, layer_norm=layer_norm, *args, **kwargs)
[ "This", "model", "takes", "as", "input", "an", "observation", "and", "returns", "values", "of", "all", "actions", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/models.py#L73-L96
[ "def", "cnn_to_mlp", "(", "convs", ",", "hiddens", ",", "dueling", "=", "False", ",", "layer_norm", "=", "False", ")", ":", "return", "lambda", "*", "args", ",", "*", "*", "kwargs", ":", "_cnn_to_mlp", "(", "convs", ",", "hiddens", ",", "dueling", ",",...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
make_vec_env
Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
baselines/common/cmd_util.py
def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, start_index=0, reward_scale=1.0, flatten_dict_observations=True, gamestate=None): """ Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo. """ wrapper_kwargs = wrapper_kwargs or {} mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0 seed = seed + 10000 * mpi_rank if seed is not None else None logger_dir = logger.get_dir() def make_thunk(rank): return lambda: make_env( env_id=env_id, env_type=env_type, mpi_rank=mpi_rank, subrank=rank, seed=seed, reward_scale=reward_scale, gamestate=gamestate, flatten_dict_observations=flatten_dict_observations, wrapper_kwargs=wrapper_kwargs, logger_dir=logger_dir ) set_global_seeds(seed) if num_env > 1: return SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)]) else: return DummyVecEnv([make_thunk(start_index)])
def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, start_index=0, reward_scale=1.0, flatten_dict_observations=True, gamestate=None): """ Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo. """ wrapper_kwargs = wrapper_kwargs or {} mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0 seed = seed + 10000 * mpi_rank if seed is not None else None logger_dir = logger.get_dir() def make_thunk(rank): return lambda: make_env( env_id=env_id, env_type=env_type, mpi_rank=mpi_rank, subrank=rank, seed=seed, reward_scale=reward_scale, gamestate=gamestate, flatten_dict_observations=flatten_dict_observations, wrapper_kwargs=wrapper_kwargs, logger_dir=logger_dir ) set_global_seeds(seed) if num_env > 1: return SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)]) else: return DummyVecEnv([make_thunk(start_index)])
[ "Create", "a", "wrapped", "monitored", "SubprocVecEnv", "for", "Atari", "and", "MuJoCo", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L21-L52
[ "def", "make_vec_env", "(", "env_id", ",", "env_type", ",", "num_env", ",", "seed", ",", "wrapper_kwargs", "=", "None", ",", "start_index", "=", "0", ",", "reward_scale", "=", "1.0", ",", "flatten_dict_observations", "=", "True", ",", "gamestate", "=", "None...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
make_mujoco_env
Create a wrapped, monitored gym.Env for MuJoCo.
baselines/common/cmd_util.py
def make_mujoco_env(env_id, seed, reward_scale=1.0): """ Create a wrapped, monitored gym.Env for MuJoCo. """ rank = MPI.COMM_WORLD.Get_rank() myseed = seed + 1000 * rank if seed is not None else None set_global_seeds(myseed) env = gym.make(env_id) logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank)) env = Monitor(env, logger_path, allow_early_resets=True) env.seed(seed) if reward_scale != 1.0: from baselines.common.retro_wrappers import RewardScaler env = RewardScaler(env, reward_scale) return env
def make_mujoco_env(env_id, seed, reward_scale=1.0): """ Create a wrapped, monitored gym.Env for MuJoCo. """ rank = MPI.COMM_WORLD.Get_rank() myseed = seed + 1000 * rank if seed is not None else None set_global_seeds(myseed) env = gym.make(env_id) logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank)) env = Monitor(env, logger_path, allow_early_resets=True) env.seed(seed) if reward_scale != 1.0: from baselines.common.retro_wrappers import RewardScaler env = RewardScaler(env, reward_scale) return env
[ "Create", "a", "wrapped", "monitored", "gym", ".", "Env", "for", "MuJoCo", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L88-L102
[ "def", "make_mujoco_env", "(", "env_id", ",", "seed", ",", "reward_scale", "=", "1.0", ")", ":", "rank", "=", "MPI", ".", "COMM_WORLD", ".", "Get_rank", "(", ")", "myseed", "=", "seed", "+", "1000", "*", "rank", "if", "seed", "is", "not", "None", "el...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
make_robotics_env
Create a wrapped, monitored gym.Env for MuJoCo.
baselines/common/cmd_util.py
def make_robotics_env(env_id, seed, rank=0): """ Create a wrapped, monitored gym.Env for MuJoCo. """ set_global_seeds(seed) env = gym.make(env_id) env = FlattenDictWrapper(env, ['observation', 'desired_goal']) env = Monitor( env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)), info_keywords=('is_success',)) env.seed(seed) return env
def make_robotics_env(env_id, seed, rank=0): """ Create a wrapped, monitored gym.Env for MuJoCo. """ set_global_seeds(seed) env = gym.make(env_id) env = FlattenDictWrapper(env, ['observation', 'desired_goal']) env = Monitor( env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)), info_keywords=('is_success',)) env.seed(seed) return env
[ "Create", "a", "wrapped", "monitored", "gym", ".", "Env", "for", "MuJoCo", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L104-L115
[ "def", "make_robotics_env", "(", "env_id", ",", "seed", ",", "rank", "=", "0", ")", ":", "set_global_seeds", "(", "seed", ")", "env", "=", "gym", ".", "make", "(", "env_id", ")", "env", "=", "FlattenDictWrapper", "(", "env", ",", "[", "'observation'", ...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
common_arg_parser
Create an argparse.ArgumentParser for run_mujoco.py.
baselines/common/cmd_util.py
def common_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2') parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str) parser.add_argument('--seed', help='RNG seed', type=int, default=None) parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2') parser.add_argument('--num_timesteps', type=float, default=1e6), parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None) parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None) parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int) parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float) parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str) parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int) parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int) parser.add_argument('--play', default=False, action='store_true') return parser
def common_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2') parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str) parser.add_argument('--seed', help='RNG seed', type=int, default=None) parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2') parser.add_argument('--num_timesteps', type=float, default=1e6), parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None) parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None) parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int) parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float) parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str) parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int) parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int) parser.add_argument('--play', default=False, action='store_true') return parser
[ "Create", "an", "argparse", ".", "ArgumentParser", "for", "run_mujoco", ".", "py", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L135-L153
[ "def", "common_arg_parser", "(", ")", ":", "parser", "=", "arg_parser", "(", ")", "parser", ".", "add_argument", "(", "'--env'", ",", "help", "=", "'environment ID'", ",", "type", "=", "str", ",", "default", "=", "'Reacher-v2'", ")", "parser", ".", "add_ar...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
robotics_arg_parser
Create an argparse.ArgumentParser for run_mujoco.py.
baselines/common/cmd_util.py
def robotics_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0') parser.add_argument('--seed', help='RNG seed', type=int, default=None) parser.add_argument('--num-timesteps', type=int, default=int(1e6)) return parser
def robotics_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0') parser.add_argument('--seed', help='RNG seed', type=int, default=None) parser.add_argument('--num-timesteps', type=int, default=int(1e6)) return parser
[ "Create", "an", "argparse", ".", "ArgumentParser", "for", "run_mujoco", ".", "py", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L155-L163
[ "def", "robotics_arg_parser", "(", ")", ":", "parser", "=", "arg_parser", "(", ")", "parser", ".", "add_argument", "(", "'--env'", ",", "help", "=", "'environment ID'", ",", "type", "=", "str", ",", "default", "=", "'FetchReach-v0'", ")", "parser", ".", "a...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
parse_unknown_args
Parse arguments not consumed by arg parser into a dicitonary
baselines/common/cmd_util.py
def parse_unknown_args(args): """ Parse arguments not consumed by arg parser into a dicitonary """ retval = {} preceded_by_key = False for arg in args: if arg.startswith('--'): if '=' in arg: key = arg.split('=')[0][2:] value = arg.split('=')[1] retval[key] = value else: key = arg[2:] preceded_by_key = True elif preceded_by_key: retval[key] = arg preceded_by_key = False return retval
def parse_unknown_args(args): """ Parse arguments not consumed by arg parser into a dicitonary """ retval = {} preceded_by_key = False for arg in args: if arg.startswith('--'): if '=' in arg: key = arg.split('=')[0][2:] value = arg.split('=')[1] retval[key] = value else: key = arg[2:] preceded_by_key = True elif preceded_by_key: retval[key] = arg preceded_by_key = False return retval
[ "Parse", "arguments", "not", "consumed", "by", "arg", "parser", "into", "a", "dicitonary" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L166-L185
[ "def", "parse_unknown_args", "(", "args", ")", ":", "retval", "=", "{", "}", "preceded_by_key", "=", "False", "for", "arg", "in", "args", ":", "if", "arg", ".", "startswith", "(", "'--'", ")", ":", "if", "'='", "in", "arg", ":", "key", "=", "arg", ...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
clear_mpi_env_vars
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang. This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing Processes.
baselines/common/vec_env/vec_env.py
def clear_mpi_env_vars(): """ from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang. This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing Processes. """ removed_environment = {} for k, v in list(os.environ.items()): for prefix in ['OMPI_', 'PMI_']: if k.startswith(prefix): removed_environment[k] = v del os.environ[k] try: yield finally: os.environ.update(removed_environment)
def clear_mpi_env_vars(): """ from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang. This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing Processes. """ removed_environment = {} for k, v in list(os.environ.items()): for prefix in ['OMPI_', 'PMI_']: if k.startswith(prefix): removed_environment[k] = v del os.environ[k] try: yield finally: os.environ.update(removed_environment)
[ "from", "mpi4py", "import", "MPI", "will", "call", "MPI_Init", "by", "default", ".", "If", "the", "child", "process", "has", "MPI", "environment", "variables", "MPI", "will", "think", "that", "the", "child", "process", "is", "an", "MPI", "process", "just", ...
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/vec_env/vec_env.py#L204-L219
[ "def", "clear_mpi_env_vars", "(", ")", ":", "removed_environment", "=", "{", "}", "for", "k", ",", "v", "in", "list", "(", "os", ".", "environ", ".", "items", "(", ")", ")", ":", "for", "prefix", "in", "[", "'OMPI_'", ",", "'PMI_'", "]", ":", "if",...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
learn
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347) Parameters: ---------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See common/models.py/lstm for more details on using recurrent nets in policies env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation. The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class. nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int number of timesteps (i.e. number of actions taken in the environment) ent_coef: float policy entropy coefficient in the optimization objective lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training. vf_coef: float value function loss coefficient in the optimization objective max_grad_norm: float or None gradient norm clipping coefficient gamma: float discounting factor lam: float advantage estimation discounting factor (lambda in the paper) log_interval: int number of timesteps between logging events nminibatches: int number of training minibatches per update. For recurrent policies, should be smaller or equal than number of environments run in parallel. noptepochs: int number of training epochs per update cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training save_interval: int number of timesteps between saving events load_path: str path to load the model from **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
baselines/ppo2/ppo2.py
def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2, save_interval=0, load_path=None, model_fn=None, **network_kwargs): ''' Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347) Parameters: ---------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See common/models.py/lstm for more details on using recurrent nets in policies env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation. The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class. nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int number of timesteps (i.e. number of actions taken in the environment) ent_coef: float policy entropy coefficient in the optimization objective lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training. vf_coef: float value function loss coefficient in the optimization objective max_grad_norm: float or None gradient norm clipping coefficient gamma: float discounting factor lam: float advantage estimation discounting factor (lambda in the paper) log_interval: int number of timesteps between logging events nminibatches: int number of training minibatches per update. For recurrent policies, should be smaller or equal than number of environments run in parallel. noptepochs: int number of training epochs per update cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training save_interval: int number of timesteps between saving events load_path: str path to load the model from **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' set_global_seeds(seed) if isinstance(lr, float): lr = constfn(lr) else: assert callable(lr) if isinstance(cliprange, float): cliprange = constfn(cliprange) else: assert callable(cliprange) total_timesteps = int(total_timesteps) policy = build_policy(env, network, **network_kwargs) # Get the nb of env nenvs = env.num_envs # Get state_space and action_space ob_space = env.observation_space ac_space = env.action_space # Calculate the batch_size nbatch = nenvs * nsteps nbatch_train = nbatch // nminibatches # Instantiate the model object (that creates act_model and train_model) if model_fn is None: from baselines.ppo2.model import Model model_fn = Model model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm) if load_path is not None: model.load(load_path) # Instantiate the runner object runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam) if eval_env is not None: eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam) epinfobuf = deque(maxlen=100) if eval_env is not None: eval_epinfobuf = deque(maxlen=100) # Start total timer tfirststart = time.perf_counter() nupdates = total_timesteps//nbatch for update in range(1, nupdates+1): assert nbatch % nminibatches == 0 # Start timer tstart = time.perf_counter() frac = 1.0 - (update - 1.0) / nupdates # Calculate the learning rate lrnow = lr(frac) # Calculate the cliprange cliprangenow = cliprange(frac) # Get minibatch obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632 if eval_env is not None: eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632 epinfobuf.extend(epinfos) if eval_env is not None: eval_epinfobuf.extend(eval_epinfos) # Here what we're going to do is for each minibatch calculate the loss and append it. mblossvals = [] if states is None: # nonrecurrent version # Index of each element of batch_size # Create the indices array inds = np.arange(nbatch) for _ in range(noptepochs): # Randomize the indexes np.random.shuffle(inds) # 0 to batch_size with batch_train_size step for start in range(0, nbatch, nbatch_train): end = start + nbatch_train mbinds = inds[start:end] slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mblossvals.append(model.train(lrnow, cliprangenow, *slices)) else: # recurrent version assert nenvs % nminibatches == 0 envsperbatch = nenvs // nminibatches envinds = np.arange(nenvs) flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps) for _ in range(noptepochs): np.random.shuffle(envinds) for start in range(0, nenvs, envsperbatch): end = start + envsperbatch mbenvinds = envinds[start:end] mbflatinds = flatinds[mbenvinds].ravel() slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mbstates = states[mbenvinds] mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates)) # Feedforward --> get losses --> update lossvals = np.mean(mblossvals, axis=0) # End timer tnow = time.perf_counter() # Calculate the fps (frame per second) fps = int(nbatch / (tnow - tstart)) if update % log_interval == 0 or update == 1: # Calculates if value function is a good predicator of the returns (ev > 1) # or if it's just worse than predicting nothing (ev =< 0) ev = explained_variance(values, returns) logger.logkv("serial_timesteps", update*nsteps) logger.logkv("nupdates", update) logger.logkv("total_timesteps", update*nbatch) logger.logkv("fps", fps) logger.logkv("explained_variance", float(ev)) logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf])) logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf])) if eval_env is not None: logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) ) logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) ) logger.logkv('time_elapsed', tnow - tfirststart) for (lossval, lossname) in zip(lossvals, model.loss_names): logger.logkv(lossname, lossval) if MPI is None or MPI.COMM_WORLD.Get_rank() == 0: logger.dumpkvs() if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and (MPI is None or MPI.COMM_WORLD.Get_rank() == 0): checkdir = osp.join(logger.get_dir(), 'checkpoints') os.makedirs(checkdir, exist_ok=True) savepath = osp.join(checkdir, '%.5i'%update) print('Saving to', savepath) model.save(savepath) return model
def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2, save_interval=0, load_path=None, model_fn=None, **network_kwargs): ''' Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347) Parameters: ---------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See common/models.py/lstm for more details on using recurrent nets in policies env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation. The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class. nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int number of timesteps (i.e. number of actions taken in the environment) ent_coef: float policy entropy coefficient in the optimization objective lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training. vf_coef: float value function loss coefficient in the optimization objective max_grad_norm: float or None gradient norm clipping coefficient gamma: float discounting factor lam: float advantage estimation discounting factor (lambda in the paper) log_interval: int number of timesteps between logging events nminibatches: int number of training minibatches per update. For recurrent policies, should be smaller or equal than number of environments run in parallel. noptepochs: int number of training epochs per update cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training save_interval: int number of timesteps between saving events load_path: str path to load the model from **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' set_global_seeds(seed) if isinstance(lr, float): lr = constfn(lr) else: assert callable(lr) if isinstance(cliprange, float): cliprange = constfn(cliprange) else: assert callable(cliprange) total_timesteps = int(total_timesteps) policy = build_policy(env, network, **network_kwargs) # Get the nb of env nenvs = env.num_envs # Get state_space and action_space ob_space = env.observation_space ac_space = env.action_space # Calculate the batch_size nbatch = nenvs * nsteps nbatch_train = nbatch // nminibatches # Instantiate the model object (that creates act_model and train_model) if model_fn is None: from baselines.ppo2.model import Model model_fn = Model model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm) if load_path is not None: model.load(load_path) # Instantiate the runner object runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam) if eval_env is not None: eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam) epinfobuf = deque(maxlen=100) if eval_env is not None: eval_epinfobuf = deque(maxlen=100) # Start total timer tfirststart = time.perf_counter() nupdates = total_timesteps//nbatch for update in range(1, nupdates+1): assert nbatch % nminibatches == 0 # Start timer tstart = time.perf_counter() frac = 1.0 - (update - 1.0) / nupdates # Calculate the learning rate lrnow = lr(frac) # Calculate the cliprange cliprangenow = cliprange(frac) # Get minibatch obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632 if eval_env is not None: eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632 epinfobuf.extend(epinfos) if eval_env is not None: eval_epinfobuf.extend(eval_epinfos) # Here what we're going to do is for each minibatch calculate the loss and append it. mblossvals = [] if states is None: # nonrecurrent version # Index of each element of batch_size # Create the indices array inds = np.arange(nbatch) for _ in range(noptepochs): # Randomize the indexes np.random.shuffle(inds) # 0 to batch_size with batch_train_size step for start in range(0, nbatch, nbatch_train): end = start + nbatch_train mbinds = inds[start:end] slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mblossvals.append(model.train(lrnow, cliprangenow, *slices)) else: # recurrent version assert nenvs % nminibatches == 0 envsperbatch = nenvs // nminibatches envinds = np.arange(nenvs) flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps) for _ in range(noptepochs): np.random.shuffle(envinds) for start in range(0, nenvs, envsperbatch): end = start + envsperbatch mbenvinds = envinds[start:end] mbflatinds = flatinds[mbenvinds].ravel() slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mbstates = states[mbenvinds] mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates)) # Feedforward --> get losses --> update lossvals = np.mean(mblossvals, axis=0) # End timer tnow = time.perf_counter() # Calculate the fps (frame per second) fps = int(nbatch / (tnow - tstart)) if update % log_interval == 0 or update == 1: # Calculates if value function is a good predicator of the returns (ev > 1) # or if it's just worse than predicting nothing (ev =< 0) ev = explained_variance(values, returns) logger.logkv("serial_timesteps", update*nsteps) logger.logkv("nupdates", update) logger.logkv("total_timesteps", update*nbatch) logger.logkv("fps", fps) logger.logkv("explained_variance", float(ev)) logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf])) logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf])) if eval_env is not None: logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) ) logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) ) logger.logkv('time_elapsed', tnow - tfirststart) for (lossval, lossname) in zip(lossvals, model.loss_names): logger.logkv(lossname, lossval) if MPI is None or MPI.COMM_WORLD.Get_rank() == 0: logger.dumpkvs() if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and (MPI is None or MPI.COMM_WORLD.Get_rank() == 0): checkdir = osp.join(logger.get_dir(), 'checkpoints') os.makedirs(checkdir, exist_ok=True) savepath = osp.join(checkdir, '%.5i'%update) print('Saving to', savepath) model.save(savepath) return model
[ "Learn", "policy", "using", "PPO", "algorithm", "(", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1707", ".", "06347", ")" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/ppo2/ppo2.py#L21-L204
[ "def", "learn", "(", "*", ",", "network", ",", "env", ",", "total_timesteps", ",", "eval_env", "=", "None", ",", "seed", "=", "None", ",", "nsteps", "=", "2048", ",", "ent_coef", "=", "0.0", ",", "lr", "=", "3e-4", ",", "vf_coef", "=", "0.5", ",", ...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
cg
Demmel p 312
baselines/common/cg.py
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10): """ Demmel p 312 """ p = b.copy() r = b.copy() x = np.zeros_like(b) rdotr = r.dot(r) fmtstr = "%10i %10.3g %10.3g" titlestr = "%10s %10s %10s" if verbose: print(titlestr % ("iter", "residual norm", "soln norm")) for i in range(cg_iters): if callback is not None: callback(x) if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x))) z = f_Ax(p) v = rdotr / p.dot(z) x += v*p r -= v*z newrdotr = r.dot(r) mu = newrdotr/rdotr p = r + mu*p rdotr = newrdotr if rdotr < residual_tol: break if callback is not None: callback(x) if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631 return x
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10): """ Demmel p 312 """ p = b.copy() r = b.copy() x = np.zeros_like(b) rdotr = r.dot(r) fmtstr = "%10i %10.3g %10.3g" titlestr = "%10s %10s %10s" if verbose: print(titlestr % ("iter", "residual norm", "soln norm")) for i in range(cg_iters): if callback is not None: callback(x) if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x))) z = f_Ax(p) v = rdotr / p.dot(z) x += v*p r -= v*z newrdotr = r.dot(r) mu = newrdotr/rdotr p = r + mu*p rdotr = newrdotr if rdotr < residual_tol: break if callback is not None: callback(x) if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631 return x
[ "Demmel", "p", "312" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cg.py#L2-L34
[ "def", "cg", "(", "f_Ax", ",", "b", ",", "cg_iters", "=", "10", ",", "callback", "=", "None", ",", "verbose", "=", "False", ",", "residual_tol", "=", "1e-10", ")", ":", "p", "=", "b", ".", "copy", "(", ")", "r", "=", "b", ".", "copy", "(", ")...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
observation_placeholder
Create placeholder to feed observations into of the size appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space batch_size: int size of the batch to be fed into input. Can be left None in most cases. name: str name of the placeholder Returns: ------- tensorflow placeholder tensor
baselines/common/input.py
def observation_placeholder(ob_space, batch_size=None, name='Ob'): ''' Create placeholder to feed observations into of the size appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space batch_size: int size of the batch to be fed into input. Can be left None in most cases. name: str name of the placeholder Returns: ------- tensorflow placeholder tensor ''' assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box) or isinstance(ob_space, MultiDiscrete), \ 'Can only deal with Discrete and Box observation spaces for now' dtype = ob_space.dtype if dtype == np.int8: dtype = np.uint8 return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name)
def observation_placeholder(ob_space, batch_size=None, name='Ob'): ''' Create placeholder to feed observations into of the size appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space batch_size: int size of the batch to be fed into input. Can be left None in most cases. name: str name of the placeholder Returns: ------- tensorflow placeholder tensor ''' assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box) or isinstance(ob_space, MultiDiscrete), \ 'Can only deal with Discrete and Box observation spaces for now' dtype = ob_space.dtype if dtype == np.int8: dtype = np.uint8 return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name)
[ "Create", "placeholder", "to", "feed", "observations", "into", "of", "the", "size", "appropriate", "to", "the", "observation", "space" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/input.py#L5-L31
[ "def", "observation_placeholder", "(", "ob_space", ",", "batch_size", "=", "None", ",", "name", "=", "'Ob'", ")", ":", "assert", "isinstance", "(", "ob_space", ",", "Discrete", ")", "or", "isinstance", "(", "ob_space", ",", "Box", ")", "or", "isinstance", ...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
observation_input
Create placeholder to feed observations into of the size appropriate to the observation space, and add input encoder of the appropriate type.
baselines/common/input.py
def observation_input(ob_space, batch_size=None, name='Ob'): ''' Create placeholder to feed observations into of the size appropriate to the observation space, and add input encoder of the appropriate type. ''' placeholder = observation_placeholder(ob_space, batch_size, name) return placeholder, encode_observation(ob_space, placeholder)
def observation_input(ob_space, batch_size=None, name='Ob'): ''' Create placeholder to feed observations into of the size appropriate to the observation space, and add input encoder of the appropriate type. ''' placeholder = observation_placeholder(ob_space, batch_size, name) return placeholder, encode_observation(ob_space, placeholder)
[ "Create", "placeholder", "to", "feed", "observations", "into", "of", "the", "size", "appropriate", "to", "the", "observation", "space", "and", "add", "input", "encoder", "of", "the", "appropriate", "type", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/input.py#L34-L41
[ "def", "observation_input", "(", "ob_space", ",", "batch_size", "=", "None", ",", "name", "=", "'Ob'", ")", ":", "placeholder", "=", "observation_placeholder", "(", "ob_space", ",", "batch_size", ",", "name", ")", "return", "placeholder", ",", "encode_observatio...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
encode_observation
Encode input in the way that is appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space placeholder: tf.placeholder observation input placeholder
baselines/common/input.py
def encode_observation(ob_space, placeholder): ''' Encode input in the way that is appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space placeholder: tf.placeholder observation input placeholder ''' if isinstance(ob_space, Discrete): return tf.to_float(tf.one_hot(placeholder, ob_space.n)) elif isinstance(ob_space, Box): return tf.to_float(placeholder) elif isinstance(ob_space, MultiDiscrete): placeholder = tf.cast(placeholder, tf.int32) one_hots = [tf.to_float(tf.one_hot(placeholder[..., i], ob_space.nvec[i])) for i in range(placeholder.shape[-1])] return tf.concat(one_hots, axis=-1) else: raise NotImplementedError
def encode_observation(ob_space, placeholder): ''' Encode input in the way that is appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space placeholder: tf.placeholder observation input placeholder ''' if isinstance(ob_space, Discrete): return tf.to_float(tf.one_hot(placeholder, ob_space.n)) elif isinstance(ob_space, Box): return tf.to_float(placeholder) elif isinstance(ob_space, MultiDiscrete): placeholder = tf.cast(placeholder, tf.int32) one_hots = [tf.to_float(tf.one_hot(placeholder[..., i], ob_space.nvec[i])) for i in range(placeholder.shape[-1])] return tf.concat(one_hots, axis=-1) else: raise NotImplementedError
[ "Encode", "input", "in", "the", "way", "that", "is", "appropriate", "to", "the", "observation", "space" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/input.py#L43-L63
[ "def", "encode_observation", "(", "ob_space", ",", "placeholder", ")", ":", "if", "isinstance", "(", "ob_space", ",", "Discrete", ")", ":", "return", "tf", ".", "to_float", "(", "tf", ".", "one_hot", "(", "placeholder", ",", "ob_space", ".", "n", ")", ")...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
RolloutWorker.generate_rollouts
Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current policy acting on it accordingly.
baselines/her/rollout.py
def generate_rollouts(self): """Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current policy acting on it accordingly. """ self.reset_all_rollouts() # compute observations o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals o[:] = self.initial_o ag[:] = self.initial_ag # generate episodes obs, achieved_goals, acts, goals, successes = [], [], [], [], [] dones = [] info_values = [np.empty((self.T - 1, self.rollout_batch_size, self.dims['info_' + key]), np.float32) for key in self.info_keys] Qs = [] for t in range(self.T): policy_output = self.policy.get_actions( o, ag, self.g, compute_Q=self.compute_Q, noise_eps=self.noise_eps if not self.exploit else 0., random_eps=self.random_eps if not self.exploit else 0., use_target_net=self.use_target_net) if self.compute_Q: u, Q = policy_output Qs.append(Q) else: u = policy_output if u.ndim == 1: # The non-batched case should still have a reasonable shape. u = u.reshape(1, -1) o_new = np.empty((self.rollout_batch_size, self.dims['o'])) ag_new = np.empty((self.rollout_batch_size, self.dims['g'])) success = np.zeros(self.rollout_batch_size) # compute new states and observations obs_dict_new, _, done, info = self.venv.step(u) o_new = obs_dict_new['observation'] ag_new = obs_dict_new['achieved_goal'] success = np.array([i.get('is_success', 0.0) for i in info]) if any(done): # here we assume all environments are done is ~same number of steps, so we terminate rollouts whenever any of the envs returns done # trick with using vecenvs is not to add the obs from the environments that are "done", because those are already observations # after a reset break for i, info_dict in enumerate(info): for idx, key in enumerate(self.info_keys): info_values[idx][t, i] = info[i][key] if np.isnan(o_new).any(): self.logger.warn('NaN caught during rollout generation. Trying again...') self.reset_all_rollouts() return self.generate_rollouts() dones.append(done) obs.append(o.copy()) achieved_goals.append(ag.copy()) successes.append(success.copy()) acts.append(u.copy()) goals.append(self.g.copy()) o[...] = o_new ag[...] = ag_new obs.append(o.copy()) achieved_goals.append(ag.copy()) episode = dict(o=obs, u=acts, g=goals, ag=achieved_goals) for key, value in zip(self.info_keys, info_values): episode['info_{}'.format(key)] = value # stats successful = np.array(successes)[-1, :] assert successful.shape == (self.rollout_batch_size,) success_rate = np.mean(successful) self.success_history.append(success_rate) if self.compute_Q: self.Q_history.append(np.mean(Qs)) self.n_episodes += self.rollout_batch_size return convert_episode_to_batch_major(episode)
def generate_rollouts(self): """Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current policy acting on it accordingly. """ self.reset_all_rollouts() # compute observations o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals o[:] = self.initial_o ag[:] = self.initial_ag # generate episodes obs, achieved_goals, acts, goals, successes = [], [], [], [], [] dones = [] info_values = [np.empty((self.T - 1, self.rollout_batch_size, self.dims['info_' + key]), np.float32) for key in self.info_keys] Qs = [] for t in range(self.T): policy_output = self.policy.get_actions( o, ag, self.g, compute_Q=self.compute_Q, noise_eps=self.noise_eps if not self.exploit else 0., random_eps=self.random_eps if not self.exploit else 0., use_target_net=self.use_target_net) if self.compute_Q: u, Q = policy_output Qs.append(Q) else: u = policy_output if u.ndim == 1: # The non-batched case should still have a reasonable shape. u = u.reshape(1, -1) o_new = np.empty((self.rollout_batch_size, self.dims['o'])) ag_new = np.empty((self.rollout_batch_size, self.dims['g'])) success = np.zeros(self.rollout_batch_size) # compute new states and observations obs_dict_new, _, done, info = self.venv.step(u) o_new = obs_dict_new['observation'] ag_new = obs_dict_new['achieved_goal'] success = np.array([i.get('is_success', 0.0) for i in info]) if any(done): # here we assume all environments are done is ~same number of steps, so we terminate rollouts whenever any of the envs returns done # trick with using vecenvs is not to add the obs from the environments that are "done", because those are already observations # after a reset break for i, info_dict in enumerate(info): for idx, key in enumerate(self.info_keys): info_values[idx][t, i] = info[i][key] if np.isnan(o_new).any(): self.logger.warn('NaN caught during rollout generation. Trying again...') self.reset_all_rollouts() return self.generate_rollouts() dones.append(done) obs.append(o.copy()) achieved_goals.append(ag.copy()) successes.append(success.copy()) acts.append(u.copy()) goals.append(self.g.copy()) o[...] = o_new ag[...] = ag_new obs.append(o.copy()) achieved_goals.append(ag.copy()) episode = dict(o=obs, u=acts, g=goals, ag=achieved_goals) for key, value in zip(self.info_keys, info_values): episode['info_{}'.format(key)] = value # stats successful = np.array(successes)[-1, :] assert successful.shape == (self.rollout_batch_size,) success_rate = np.mean(successful) self.success_history.append(success_rate) if self.compute_Q: self.Q_history.append(np.mean(Qs)) self.n_episodes += self.rollout_batch_size return convert_episode_to_batch_major(episode)
[ "Performs", "rollout_batch_size", "rollouts", "in", "parallel", "for", "time", "horizon", "T", "with", "the", "current", "policy", "acting", "on", "it", "accordingly", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/rollout.py#L51-L137
[ "def", "generate_rollouts", "(", "self", ")", ":", "self", ".", "reset_all_rollouts", "(", ")", "# compute observations", "o", "=", "np", ".", "empty", "(", "(", "self", ".", "rollout_batch_size", ",", "self", ".", "dims", "[", "'o'", "]", ")", ",", "np"...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
RolloutWorker.save_policy
Pickles the current policy for later inspection.
baselines/her/rollout.py
def save_policy(self, path): """Pickles the current policy for later inspection. """ with open(path, 'wb') as f: pickle.dump(self.policy, f)
def save_policy(self, path): """Pickles the current policy for later inspection. """ with open(path, 'wb') as f: pickle.dump(self.policy, f)
[ "Pickles", "the", "current", "policy", "for", "later", "inspection", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/rollout.py#L151-L155
[ "def", "save_policy", "(", "self", ",", "path", ")", ":", "with", "open", "(", "path", ",", "'wb'", ")", "as", "f", ":", "pickle", ".", "dump", "(", "self", ".", "policy", ",", "f", ")" ]
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
RolloutWorker.logs
Generates a dictionary that contains all collected statistics.
baselines/her/rollout.py
def logs(self, prefix='worker'): """Generates a dictionary that contains all collected statistics. """ logs = [] logs += [('success_rate', np.mean(self.success_history))] if self.compute_Q: logs += [('mean_Q', np.mean(self.Q_history))] logs += [('episode', self.n_episodes)] if prefix != '' and not prefix.endswith('/'): return [(prefix + '/' + key, val) for key, val in logs] else: return logs
def logs(self, prefix='worker'): """Generates a dictionary that contains all collected statistics. """ logs = [] logs += [('success_rate', np.mean(self.success_history))] if self.compute_Q: logs += [('mean_Q', np.mean(self.Q_history))] logs += [('episode', self.n_episodes)] if prefix != '' and not prefix.endswith('/'): return [(prefix + '/' + key, val) for key, val in logs] else: return logs
[ "Generates", "a", "dictionary", "that", "contains", "all", "collected", "statistics", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/rollout.py#L157-L169
[ "def", "logs", "(", "self", ",", "prefix", "=", "'worker'", ")", ":", "logs", "=", "[", "]", "logs", "+=", "[", "(", "'success_rate'", ",", "np", ".", "mean", "(", "self", ".", "success_history", ")", ")", "]", "if", "self", ".", "compute_Q", ":", ...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
smooth
Smooth signal y, where radius is determines the size of the window mode='twosided': average over the window [max(index - radius, 0), min(index + radius, len(y)-1)] mode='causal': average over the window [max(index - radius, 0), index] valid_only: put nan in entries where the full-sized window is not available
baselines/common/plot_util.py
def smooth(y, radius, mode='two_sided', valid_only=False): ''' Smooth signal y, where radius is determines the size of the window mode='twosided': average over the window [max(index - radius, 0), min(index + radius, len(y)-1)] mode='causal': average over the window [max(index - radius, 0), index] valid_only: put nan in entries where the full-sized window is not available ''' assert mode in ('two_sided', 'causal') if len(y) < 2*radius+1: return np.ones_like(y) * y.mean() elif mode == 'two_sided': convkernel = np.ones(2 * radius+1) out = np.convolve(y, convkernel,mode='same') / np.convolve(np.ones_like(y), convkernel, mode='same') if valid_only: out[:radius] = out[-radius:] = np.nan elif mode == 'causal': convkernel = np.ones(radius) out = np.convolve(y, convkernel,mode='full') / np.convolve(np.ones_like(y), convkernel, mode='full') out = out[:-radius+1] if valid_only: out[:radius] = np.nan return out
def smooth(y, radius, mode='two_sided', valid_only=False): ''' Smooth signal y, where radius is determines the size of the window mode='twosided': average over the window [max(index - radius, 0), min(index + radius, len(y)-1)] mode='causal': average over the window [max(index - radius, 0), index] valid_only: put nan in entries where the full-sized window is not available ''' assert mode in ('two_sided', 'causal') if len(y) < 2*radius+1: return np.ones_like(y) * y.mean() elif mode == 'two_sided': convkernel = np.ones(2 * radius+1) out = np.convolve(y, convkernel,mode='same') / np.convolve(np.ones_like(y), convkernel, mode='same') if valid_only: out[:radius] = out[-radius:] = np.nan elif mode == 'causal': convkernel = np.ones(radius) out = np.convolve(y, convkernel,mode='full') / np.convolve(np.ones_like(y), convkernel, mode='full') out = out[:-radius+1] if valid_only: out[:radius] = np.nan return out
[ "Smooth", "signal", "y", "where", "radius", "is", "determines", "the", "size", "of", "the", "window" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L11-L37
[ "def", "smooth", "(", "y", ",", "radius", ",", "mode", "=", "'two_sided'", ",", "valid_only", "=", "False", ")", ":", "assert", "mode", "in", "(", "'two_sided'", ",", "'causal'", ")", "if", "len", "(", "y", ")", "<", "2", "*", "radius", "+", "1", ...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
one_sided_ema
perform one-sided (causal) EMA (exponential moving average) smoothing and resampling to an even grid with n points. Does not do extrapolation, so we assume xolds[0] <= low && high <= xolds[-1] Arguments: xolds: array or list - x values of data. Needs to be sorted in ascending order yolds: array of list - y values of data. Has to have the same length as xolds low: float - min value of the new x grid. By default equals to xolds[0] high: float - max value of the new x grid. By default equals to xolds[-1] n: int - number of points in new x grid decay_steps: float - EMA decay factor, expressed in new x grid steps. low_counts_threshold: float or int - y values with counts less than this value will be set to NaN Returns: tuple sum_ys, count_ys where xs - array with new x grid ys - array of EMA of y at each point of the new x grid count_ys - array of EMA of y counts at each point of the new x grid
baselines/common/plot_util.py
def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8): ''' perform one-sided (causal) EMA (exponential moving average) smoothing and resampling to an even grid with n points. Does not do extrapolation, so we assume xolds[0] <= low && high <= xolds[-1] Arguments: xolds: array or list - x values of data. Needs to be sorted in ascending order yolds: array of list - y values of data. Has to have the same length as xolds low: float - min value of the new x grid. By default equals to xolds[0] high: float - max value of the new x grid. By default equals to xolds[-1] n: int - number of points in new x grid decay_steps: float - EMA decay factor, expressed in new x grid steps. low_counts_threshold: float or int - y values with counts less than this value will be set to NaN Returns: tuple sum_ys, count_ys where xs - array with new x grid ys - array of EMA of y at each point of the new x grid count_ys - array of EMA of y counts at each point of the new x grid ''' low = xolds[0] if low is None else low high = xolds[-1] if high is None else high assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0]) assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1]) assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds)) xolds = xolds.astype('float64') yolds = yolds.astype('float64') luoi = 0 # last unused old index sum_y = 0. count_y = 0. xnews = np.linspace(low, high, n) decay_period = (high - low) / (n - 1) * decay_steps interstep_decay = np.exp(- 1. / decay_steps) sum_ys = np.zeros_like(xnews) count_ys = np.zeros_like(xnews) for i in range(n): xnew = xnews[i] sum_y *= interstep_decay count_y *= interstep_decay while True: xold = xolds[luoi] if xold <= xnew: decay = np.exp(- (xnew - xold) / decay_period) sum_y += decay * yolds[luoi] count_y += decay luoi += 1 else: break if luoi >= len(xolds): break sum_ys[i] = sum_y count_ys[i] = count_y ys = sum_ys / count_ys ys[count_ys < low_counts_threshold] = np.nan return xnews, ys, count_ys
def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8): ''' perform one-sided (causal) EMA (exponential moving average) smoothing and resampling to an even grid with n points. Does not do extrapolation, so we assume xolds[0] <= low && high <= xolds[-1] Arguments: xolds: array or list - x values of data. Needs to be sorted in ascending order yolds: array of list - y values of data. Has to have the same length as xolds low: float - min value of the new x grid. By default equals to xolds[0] high: float - max value of the new x grid. By default equals to xolds[-1] n: int - number of points in new x grid decay_steps: float - EMA decay factor, expressed in new x grid steps. low_counts_threshold: float or int - y values with counts less than this value will be set to NaN Returns: tuple sum_ys, count_ys where xs - array with new x grid ys - array of EMA of y at each point of the new x grid count_ys - array of EMA of y counts at each point of the new x grid ''' low = xolds[0] if low is None else low high = xolds[-1] if high is None else high assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0]) assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1]) assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds)) xolds = xolds.astype('float64') yolds = yolds.astype('float64') luoi = 0 # last unused old index sum_y = 0. count_y = 0. xnews = np.linspace(low, high, n) decay_period = (high - low) / (n - 1) * decay_steps interstep_decay = np.exp(- 1. / decay_steps) sum_ys = np.zeros_like(xnews) count_ys = np.zeros_like(xnews) for i in range(n): xnew = xnews[i] sum_y *= interstep_decay count_y *= interstep_decay while True: xold = xolds[luoi] if xold <= xnew: decay = np.exp(- (xnew - xold) / decay_period) sum_y += decay * yolds[luoi] count_y += decay luoi += 1 else: break if luoi >= len(xolds): break sum_ys[i] = sum_y count_ys[i] = count_y ys = sum_ys / count_ys ys[count_ys < low_counts_threshold] = np.nan return xnews, ys, count_ys
[ "perform", "one", "-", "sided", "(", "causal", ")", "EMA", "(", "exponential", "moving", "average", ")", "smoothing", "and", "resampling", "to", "an", "even", "grid", "with", "n", "points", ".", "Does", "not", "do", "extrapolation", "so", "we", "assume", ...
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L39-L109
[ "def", "one_sided_ema", "(", "xolds", ",", "yolds", ",", "low", "=", "None", ",", "high", "=", "None", ",", "n", "=", "512", ",", "decay_steps", "=", "1.", ",", "low_counts_threshold", "=", "1e-8", ")", ":", "low", "=", "xolds", "[", "0", "]", "if"...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
symmetric_ema
perform symmetric EMA (exponential moving average) smoothing and resampling to an even grid with n points. Does not do extrapolation, so we assume xolds[0] <= low && high <= xolds[-1] Arguments: xolds: array or list - x values of data. Needs to be sorted in ascending order yolds: array of list - y values of data. Has to have the same length as xolds low: float - min value of the new x grid. By default equals to xolds[0] high: float - max value of the new x grid. By default equals to xolds[-1] n: int - number of points in new x grid decay_steps: float - EMA decay factor, expressed in new x grid steps. low_counts_threshold: float or int - y values with counts less than this value will be set to NaN Returns: tuple sum_ys, count_ys where xs - array with new x grid ys - array of EMA of y at each point of the new x grid count_ys - array of EMA of y counts at each point of the new x grid
baselines/common/plot_util.py
def symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8): ''' perform symmetric EMA (exponential moving average) smoothing and resampling to an even grid with n points. Does not do extrapolation, so we assume xolds[0] <= low && high <= xolds[-1] Arguments: xolds: array or list - x values of data. Needs to be sorted in ascending order yolds: array of list - y values of data. Has to have the same length as xolds low: float - min value of the new x grid. By default equals to xolds[0] high: float - max value of the new x grid. By default equals to xolds[-1] n: int - number of points in new x grid decay_steps: float - EMA decay factor, expressed in new x grid steps. low_counts_threshold: float or int - y values with counts less than this value will be set to NaN Returns: tuple sum_ys, count_ys where xs - array with new x grid ys - array of EMA of y at each point of the new x grid count_ys - array of EMA of y counts at each point of the new x grid ''' xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0) _, ys2, count_ys2 = one_sided_ema(-xolds[::-1], yolds[::-1], -high, -low, n, decay_steps, low_counts_threshold=0) ys2 = ys2[::-1] count_ys2 = count_ys2[::-1] count_ys = count_ys1 + count_ys2 ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys ys[count_ys < low_counts_threshold] = np.nan return xs, ys, count_ys
def symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8): ''' perform symmetric EMA (exponential moving average) smoothing and resampling to an even grid with n points. Does not do extrapolation, so we assume xolds[0] <= low && high <= xolds[-1] Arguments: xolds: array or list - x values of data. Needs to be sorted in ascending order yolds: array of list - y values of data. Has to have the same length as xolds low: float - min value of the new x grid. By default equals to xolds[0] high: float - max value of the new x grid. By default equals to xolds[-1] n: int - number of points in new x grid decay_steps: float - EMA decay factor, expressed in new x grid steps. low_counts_threshold: float or int - y values with counts less than this value will be set to NaN Returns: tuple sum_ys, count_ys where xs - array with new x grid ys - array of EMA of y at each point of the new x grid count_ys - array of EMA of y counts at each point of the new x grid ''' xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0) _, ys2, count_ys2 = one_sided_ema(-xolds[::-1], yolds[::-1], -high, -low, n, decay_steps, low_counts_threshold=0) ys2 = ys2[::-1] count_ys2 = count_ys2[::-1] count_ys = count_ys1 + count_ys2 ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys ys[count_ys < low_counts_threshold] = np.nan return xs, ys, count_ys
[ "perform", "symmetric", "EMA", "(", "exponential", "moving", "average", ")", "smoothing", "and", "resampling", "to", "an", "even", "grid", "with", "n", "points", ".", "Does", "not", "do", "extrapolation", "so", "we", "assume", "xolds", "[", "0", "]", "<", ...
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L111-L147
[ "def", "symmetric_ema", "(", "xolds", ",", "yolds", ",", "low", "=", "None", ",", "high", "=", "None", ",", "n", "=", "512", ",", "decay_steps", "=", "1.", ",", "low_counts_threshold", "=", "1e-8", ")", ":", "xs", ",", "ys1", ",", "count_ys1", "=", ...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
load_results
load summaries of runs from a list of directories (including subdirectories) Arguments: enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False Returns: List of Result objects with the following fields: - dirname - path to the directory data was loaded from - metadata - run metadata (such as command-line arguments and anything else in metadata.json file - monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory) - progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file
baselines/common/plot_util.py
def load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False): ''' load summaries of runs from a list of directories (including subdirectories) Arguments: enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False Returns: List of Result objects with the following fields: - dirname - path to the directory data was loaded from - metadata - run metadata (such as command-line arguments and anything else in metadata.json file - monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory) - progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file ''' import re if isinstance(root_dir_or_dirs, str): rootdirs = [osp.expanduser(root_dir_or_dirs)] else: rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs] allresults = [] for rootdir in rootdirs: assert osp.exists(rootdir), "%s doesn't exist"%rootdir for dirname, dirs, files in os.walk(rootdir): if '-proc' in dirname: files[:] = [] continue monitor_re = re.compile(r'(\d+\.)?(\d+\.)?monitor\.csv') if set(['metadata.json', 'monitor.json', 'progress.json', 'progress.csv']).intersection(files) or \ any([f for f in files if monitor_re.match(f)]): # also match monitor files like 0.1.monitor.csv # used to be uncommented, which means do not go deeper than current directory if any of the data files # are found # dirs[:] = [] result = {'dirname' : dirname} if "metadata.json" in files: with open(osp.join(dirname, "metadata.json"), "r") as fh: result['metadata'] = json.load(fh) progjson = osp.join(dirname, "progress.json") progcsv = osp.join(dirname, "progress.csv") if enable_progress: if osp.exists(progjson): result['progress'] = pandas.DataFrame(read_json(progjson)) elif osp.exists(progcsv): try: result['progress'] = read_csv(progcsv) except pandas.errors.EmptyDataError: print('skipping progress file in ', dirname, 'empty data') else: if verbose: print('skipping %s: no progress file'%dirname) if enable_monitor: try: result['monitor'] = pandas.DataFrame(monitor.load_results(dirname)) except monitor.LoadMonitorResultsError: print('skipping %s: no monitor files'%dirname) except Exception as e: print('exception loading monitor file in %s: %s'%(dirname, e)) if result.get('monitor') is not None or result.get('progress') is not None: allresults.append(Result(**result)) if verbose: print('successfully loaded %s'%dirname) if verbose: print('loaded %i results'%len(allresults)) return allresults
def load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False): ''' load summaries of runs from a list of directories (including subdirectories) Arguments: enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False Returns: List of Result objects with the following fields: - dirname - path to the directory data was loaded from - metadata - run metadata (such as command-line arguments and anything else in metadata.json file - monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory) - progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file ''' import re if isinstance(root_dir_or_dirs, str): rootdirs = [osp.expanduser(root_dir_or_dirs)] else: rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs] allresults = [] for rootdir in rootdirs: assert osp.exists(rootdir), "%s doesn't exist"%rootdir for dirname, dirs, files in os.walk(rootdir): if '-proc' in dirname: files[:] = [] continue monitor_re = re.compile(r'(\d+\.)?(\d+\.)?monitor\.csv') if set(['metadata.json', 'monitor.json', 'progress.json', 'progress.csv']).intersection(files) or \ any([f for f in files if monitor_re.match(f)]): # also match monitor files like 0.1.monitor.csv # used to be uncommented, which means do not go deeper than current directory if any of the data files # are found # dirs[:] = [] result = {'dirname' : dirname} if "metadata.json" in files: with open(osp.join(dirname, "metadata.json"), "r") as fh: result['metadata'] = json.load(fh) progjson = osp.join(dirname, "progress.json") progcsv = osp.join(dirname, "progress.csv") if enable_progress: if osp.exists(progjson): result['progress'] = pandas.DataFrame(read_json(progjson)) elif osp.exists(progcsv): try: result['progress'] = read_csv(progcsv) except pandas.errors.EmptyDataError: print('skipping progress file in ', dirname, 'empty data') else: if verbose: print('skipping %s: no progress file'%dirname) if enable_monitor: try: result['monitor'] = pandas.DataFrame(monitor.load_results(dirname)) except monitor.LoadMonitorResultsError: print('skipping %s: no monitor files'%dirname) except Exception as e: print('exception loading monitor file in %s: %s'%(dirname, e)) if result.get('monitor') is not None or result.get('progress') is not None: allresults.append(Result(**result)) if verbose: print('successfully loaded %s'%dirname) if verbose: print('loaded %i results'%len(allresults)) return allresults
[ "load", "summaries", "of", "runs", "from", "a", "list", "of", "directories", "(", "including", "subdirectories", ")", "Arguments", ":" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L152-L220
[ "def", "load_results", "(", "root_dir_or_dirs", ",", "enable_progress", "=", "True", ",", "enable_monitor", "=", "True", ",", "verbose", "=", "False", ")", ":", "import", "re", "if", "isinstance", "(", "root_dir_or_dirs", ",", "str", ")", ":", "rootdirs", "=...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
plot_results
Plot multiple Results objects xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values. By default, x is cumsum of episode lengths, and y is episode rewards split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by. That is, the results r for which split_fn(r) is different will be put on different sub-panels. By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are stacked vertically in the figure. group_fn: function Result -> hashable - function that converts results objects into keys to group curves by. That is, the results r for which group_fn(r) is the same will be put into the same group. Curves in the same group have the same color (if average_group is False), or averaged over (if average_group is True). The default value is the same as default value for split_fn average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling (if resample = 0, will use 512 steps) shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be shown (only applicable if average_group = True) shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves (that is, standard deviation divided by square root of number of curves) will be shown (only applicable if average_group = True) figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of sub-panels. legend_outside: bool - if True, will place the legend outside of the sub-panels. resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric EMA smoothing (see the docstring for symmetric_ema). Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default value is 512. smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step). See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.
baselines/common/plot_util.py
def plot_results( allresults, *, xy_fn=default_xy_fn, split_fn=default_split_fn, group_fn=default_split_fn, average_group=False, shaded_std=True, shaded_err=True, figsize=None, legend_outside=False, resample=0, smooth_step=1.0 ): ''' Plot multiple Results objects xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values. By default, x is cumsum of episode lengths, and y is episode rewards split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by. That is, the results r for which split_fn(r) is different will be put on different sub-panels. By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are stacked vertically in the figure. group_fn: function Result -> hashable - function that converts results objects into keys to group curves by. That is, the results r for which group_fn(r) is the same will be put into the same group. Curves in the same group have the same color (if average_group is False), or averaged over (if average_group is True). The default value is the same as default value for split_fn average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling (if resample = 0, will use 512 steps) shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be shown (only applicable if average_group = True) shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves (that is, standard deviation divided by square root of number of curves) will be shown (only applicable if average_group = True) figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of sub-panels. legend_outside: bool - if True, will place the legend outside of the sub-panels. resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric EMA smoothing (see the docstring for symmetric_ema). Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default value is 512. smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step). See docstrings for decay_steps in symmetric_ema or one_sided_ema functions. ''' if split_fn is None: split_fn = lambda _ : '' if group_fn is None: group_fn = lambda _ : '' sk2r = defaultdict(list) # splitkey2results for result in allresults: splitkey = split_fn(result) sk2r[splitkey].append(result) assert len(sk2r) > 0 assert isinstance(resample, int), "0: don't resample. <integer>: that many samples" nrows = len(sk2r) ncols = 1 figsize = figsize or (6, 6 * nrows) f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize) groups = list(set(group_fn(result) for result in allresults)) default_samples = 512 if average_group: resample = resample or default_samples for (isplit, sk) in enumerate(sorted(sk2r.keys())): g2l = {} g2c = defaultdict(int) sresults = sk2r[sk] gresults = defaultdict(list) ax = axarr[isplit][0] for result in sresults: group = group_fn(result) g2c[group] += 1 x, y = xy_fn(result) if x is None: x = np.arange(len(y)) x, y = map(np.asarray, (x, y)) if average_group: gresults[group].append((x,y)) else: if resample: x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step) l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)]) g2l[group] = l if average_group: for group in sorted(groups): xys = gresults[group] if not any(xys): continue color = COLORS[groups.index(group) % len(COLORS)] origxs = [xy[0] for xy in xys] minxlen = min(map(len, origxs)) def allequal(qs): return all((q==qs[0]).all() for q in qs[1:]) if resample: low = max(x[0] for x in origxs) high = min(x[-1] for x in origxs) usex = np.linspace(low, high, resample) ys = [] for (x, y) in xys: ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1]) else: assert allequal([x[:minxlen] for x in origxs]),\ 'If you want to average unevenly sampled data, set resample=<number of samples you want>' usex = origxs[0] ys = [xy[1][:minxlen] for xy in xys] ymean = np.mean(ys, axis=0) ystd = np.std(ys, axis=0) ystderr = ystd / np.sqrt(len(ys)) l, = axarr[isplit][0].plot(usex, ymean, color=color) g2l[group] = l if shaded_err: ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4) if shaded_std: ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2) # https://matplotlib.org/users/legend_guide.html plt.tight_layout() if any(g2l.keys()): ax.legend( g2l.values(), ['%s (%i)'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(), loc=2 if legend_outside else None, bbox_to_anchor=(1,1) if legend_outside else None) ax.set_title(sk) return f, axarr
def plot_results( allresults, *, xy_fn=default_xy_fn, split_fn=default_split_fn, group_fn=default_split_fn, average_group=False, shaded_std=True, shaded_err=True, figsize=None, legend_outside=False, resample=0, smooth_step=1.0 ): ''' Plot multiple Results objects xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values. By default, x is cumsum of episode lengths, and y is episode rewards split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by. That is, the results r for which split_fn(r) is different will be put on different sub-panels. By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are stacked vertically in the figure. group_fn: function Result -> hashable - function that converts results objects into keys to group curves by. That is, the results r for which group_fn(r) is the same will be put into the same group. Curves in the same group have the same color (if average_group is False), or averaged over (if average_group is True). The default value is the same as default value for split_fn average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling (if resample = 0, will use 512 steps) shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be shown (only applicable if average_group = True) shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves (that is, standard deviation divided by square root of number of curves) will be shown (only applicable if average_group = True) figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of sub-panels. legend_outside: bool - if True, will place the legend outside of the sub-panels. resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric EMA smoothing (see the docstring for symmetric_ema). Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default value is 512. smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step). See docstrings for decay_steps in symmetric_ema or one_sided_ema functions. ''' if split_fn is None: split_fn = lambda _ : '' if group_fn is None: group_fn = lambda _ : '' sk2r = defaultdict(list) # splitkey2results for result in allresults: splitkey = split_fn(result) sk2r[splitkey].append(result) assert len(sk2r) > 0 assert isinstance(resample, int), "0: don't resample. <integer>: that many samples" nrows = len(sk2r) ncols = 1 figsize = figsize or (6, 6 * nrows) f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize) groups = list(set(group_fn(result) for result in allresults)) default_samples = 512 if average_group: resample = resample or default_samples for (isplit, sk) in enumerate(sorted(sk2r.keys())): g2l = {} g2c = defaultdict(int) sresults = sk2r[sk] gresults = defaultdict(list) ax = axarr[isplit][0] for result in sresults: group = group_fn(result) g2c[group] += 1 x, y = xy_fn(result) if x is None: x = np.arange(len(y)) x, y = map(np.asarray, (x, y)) if average_group: gresults[group].append((x,y)) else: if resample: x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step) l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)]) g2l[group] = l if average_group: for group in sorted(groups): xys = gresults[group] if not any(xys): continue color = COLORS[groups.index(group) % len(COLORS)] origxs = [xy[0] for xy in xys] minxlen = min(map(len, origxs)) def allequal(qs): return all((q==qs[0]).all() for q in qs[1:]) if resample: low = max(x[0] for x in origxs) high = min(x[-1] for x in origxs) usex = np.linspace(low, high, resample) ys = [] for (x, y) in xys: ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1]) else: assert allequal([x[:minxlen] for x in origxs]),\ 'If you want to average unevenly sampled data, set resample=<number of samples you want>' usex = origxs[0] ys = [xy[1][:minxlen] for xy in xys] ymean = np.mean(ys, axis=0) ystd = np.std(ys, axis=0) ystderr = ystd / np.sqrt(len(ys)) l, = axarr[isplit][0].plot(usex, ymean, color=color) g2l[group] = l if shaded_err: ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4) if shaded_std: ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2) # https://matplotlib.org/users/legend_guide.html plt.tight_layout() if any(g2l.keys()): ax.legend( g2l.values(), ['%s (%i)'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(), loc=2 if legend_outside else None, bbox_to_anchor=(1,1) if legend_outside else None) ax.set_title(sk) return f, axarr
[ "Plot", "multiple", "Results", "objects" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L240-L375
[ "def", "plot_results", "(", "allresults", ",", "*", ",", "xy_fn", "=", "default_xy_fn", ",", "split_fn", "=", "default_split_fn", ",", "group_fn", "=", "default_split_fn", ",", "average_group", "=", "False", ",", "shaded_std", "=", "True", ",", "shaded_err", "...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
check_synced
It's common to forget to initialize your variables to the same values, or (less commonly) if you update them in some other way than adam, to get them out of sync. This function checks that variables on all MPI workers are the same, and raises an AssertionError otherwise Arguments: comm: MPI communicator localval: list of local variables (list of variables on current worker to be compared with the other workers)
baselines/common/mpi_adam_optimizer.py
def check_synced(localval, comm=None): """ It's common to forget to initialize your variables to the same values, or (less commonly) if you update them in some other way than adam, to get them out of sync. This function checks that variables on all MPI workers are the same, and raises an AssertionError otherwise Arguments: comm: MPI communicator localval: list of local variables (list of variables on current worker to be compared with the other workers) """ comm = comm or MPI.COMM_WORLD vals = comm.gather(localval) if comm.rank == 0: assert all(val==vals[0] for val in vals[1:])
def check_synced(localval, comm=None): """ It's common to forget to initialize your variables to the same values, or (less commonly) if you update them in some other way than adam, to get them out of sync. This function checks that variables on all MPI workers are the same, and raises an AssertionError otherwise Arguments: comm: MPI communicator localval: list of local variables (list of variables on current worker to be compared with the other workers) """ comm = comm or MPI.COMM_WORLD vals = comm.gather(localval) if comm.rank == 0: assert all(val==vals[0] for val in vals[1:])
[ "It", "s", "common", "to", "forget", "to", "initialize", "your", "variables", "to", "the", "same", "values", "or", "(", "less", "commonly", ")", "if", "you", "update", "them", "in", "some", "other", "way", "than", "adam", "to", "get", "them", "out", "o...
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/mpi_adam_optimizer.py#L40-L54
[ "def", "check_synced", "(", "localval", ",", "comm", "=", "None", ")", ":", "comm", "=", "comm", "or", "MPI", ".", "COMM_WORLD", "vals", "=", "comm", ".", "gather", "(", "localval", ")", "if", "comm", ".", "rank", "==", "0", ":", "assert", "all", "...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
copy_obs_dict
Deep-copy an observation dict.
baselines/common/vec_env/util.py
def copy_obs_dict(obs): """ Deep-copy an observation dict. """ return {k: np.copy(v) for k, v in obs.items()}
def copy_obs_dict(obs): """ Deep-copy an observation dict. """ return {k: np.copy(v) for k, v in obs.items()}
[ "Deep", "-", "copy", "an", "observation", "dict", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/vec_env/util.py#L11-L15
[ "def", "copy_obs_dict", "(", "obs", ")", ":", "return", "{", "k", ":", "np", ".", "copy", "(", "v", ")", "for", "k", ",", "v", "in", "obs", ".", "items", "(", ")", "}" ]
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
obs_space_info
Get dict-structured information about a gym.Space. Returns: A tuple (keys, shapes, dtypes): keys: a list of dict keys. shapes: a dict mapping keys to shapes. dtypes: a dict mapping keys to dtypes.
baselines/common/vec_env/util.py
def obs_space_info(obs_space): """ Get dict-structured information about a gym.Space. Returns: A tuple (keys, shapes, dtypes): keys: a list of dict keys. shapes: a dict mapping keys to shapes. dtypes: a dict mapping keys to dtypes. """ if isinstance(obs_space, gym.spaces.Dict): assert isinstance(obs_space.spaces, OrderedDict) subspaces = obs_space.spaces else: subspaces = {None: obs_space} keys = [] shapes = {} dtypes = {} for key, box in subspaces.items(): keys.append(key) shapes[key] = box.shape dtypes[key] = box.dtype return keys, shapes, dtypes
def obs_space_info(obs_space): """ Get dict-structured information about a gym.Space. Returns: A tuple (keys, shapes, dtypes): keys: a list of dict keys. shapes: a dict mapping keys to shapes. dtypes: a dict mapping keys to dtypes. """ if isinstance(obs_space, gym.spaces.Dict): assert isinstance(obs_space.spaces, OrderedDict) subspaces = obs_space.spaces else: subspaces = {None: obs_space} keys = [] shapes = {} dtypes = {} for key, box in subspaces.items(): keys.append(key) shapes[key] = box.shape dtypes[key] = box.dtype return keys, shapes, dtypes
[ "Get", "dict", "-", "structured", "information", "about", "a", "gym", ".", "Space", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/vec_env/util.py#L28-L50
[ "def", "obs_space_info", "(", "obs_space", ")", ":", "if", "isinstance", "(", "obs_space", ",", "gym", ".", "spaces", ".", "Dict", ")", ":", "assert", "isinstance", "(", "obs_space", ".", "spaces", ",", "OrderedDict", ")", "subspaces", "=", "obs_space", "....
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
q_retrace
Calculates q_retrace targets :param R: Rewards :param D: Dones :param q_i: Q values for actions taken :param v: V values :param rho_i: Importance weight for each action :return: Q_retrace values
baselines/acer/acer.py
def q_retrace(R, D, q_i, v, rho_i, nenvs, nsteps, gamma): """ Calculates q_retrace targets :param R: Rewards :param D: Dones :param q_i: Q values for actions taken :param v: V values :param rho_i: Importance weight for each action :return: Q_retrace values """ rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), nenvs, nsteps, True) # list of len steps, shape [nenvs] rs = batch_to_seq(R, nenvs, nsteps, True) # list of len steps, shape [nenvs] ds = batch_to_seq(D, nenvs, nsteps, True) # list of len steps, shape [nenvs] q_is = batch_to_seq(q_i, nenvs, nsteps, True) vs = batch_to_seq(v, nenvs, nsteps + 1, True) v_final = vs[-1] qret = v_final qrets = [] for i in range(nsteps - 1, -1, -1): check_shape([qret, ds[i], rs[i], rho_bar[i], q_is[i], vs[i]], [[nenvs]] * 6) qret = rs[i] + gamma * qret * (1.0 - ds[i]) qrets.append(qret) qret = (rho_bar[i] * (qret - q_is[i])) + vs[i] qrets = qrets[::-1] qret = seq_to_batch(qrets, flat=True) return qret
def q_retrace(R, D, q_i, v, rho_i, nenvs, nsteps, gamma): """ Calculates q_retrace targets :param R: Rewards :param D: Dones :param q_i: Q values for actions taken :param v: V values :param rho_i: Importance weight for each action :return: Q_retrace values """ rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), nenvs, nsteps, True) # list of len steps, shape [nenvs] rs = batch_to_seq(R, nenvs, nsteps, True) # list of len steps, shape [nenvs] ds = batch_to_seq(D, nenvs, nsteps, True) # list of len steps, shape [nenvs] q_is = batch_to_seq(q_i, nenvs, nsteps, True) vs = batch_to_seq(v, nenvs, nsteps + 1, True) v_final = vs[-1] qret = v_final qrets = [] for i in range(nsteps - 1, -1, -1): check_shape([qret, ds[i], rs[i], rho_bar[i], q_is[i], vs[i]], [[nenvs]] * 6) qret = rs[i] + gamma * qret * (1.0 - ds[i]) qrets.append(qret) qret = (rho_bar[i] * (qret - q_is[i])) + vs[i] qrets = qrets[::-1] qret = seq_to_batch(qrets, flat=True) return qret
[ "Calculates", "q_retrace", "targets" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/acer/acer.py#L25-L51
[ "def", "q_retrace", "(", "R", ",", "D", ",", "q_i", ",", "v", ",", "rho_i", ",", "nenvs", ",", "nsteps", ",", "gamma", ")", ":", "rho_bar", "=", "batch_to_seq", "(", "tf", ".", "minimum", "(", "1.0", ",", "rho_i", ")", ",", "nenvs", ",", "nsteps"...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
learn
Main entrypoint for ACER (Actor-Critic with Experience Replay) algorithm (https://arxiv.org/pdf/1611.01224.pdf) Train an agent with given network architecture on a given environment using ACER. Parameters: ---------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See baselines.common/policies.py/lstm for more details on using recurrent nets in policies env: environment. Needs to be vectorized for parallel environment simulation. The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class. nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) (default: 20) nstack: int, size of the frame stack, i.e. number of the frames passed to the step model. Frames are stacked along channel dimension (last image dimension) (default: 4) total_timesteps: int, number of timesteps (i.e. number of actions taken in the environment) (default: 80M) q_coef: float, value function loss coefficient in the optimization objective (analog of vf_coef for other actor-critic methods) ent_coef: float, policy entropy coefficient in the optimization objective (default: 0.01) max_grad_norm: float, gradient norm clipping coefficient. If set to None, no clipping. (default: 10), lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4) lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and returns fraction of the learning rate (specified as lr) as output rprop_epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5) rprop_alpha: float, RMSProp decay parameter (default: 0.99) gamma: float, reward discounting factor (default: 0.99) log_interval: int, number of updates between logging events (default: 100) buffer_size: int, size of the replay buffer (default: 50k) replay_ratio: int, now many (on average) batches of data to sample from the replay buffer take after batch from the environment (default: 4) replay_start: int, the sampling from the replay buffer does not start until replay buffer has at least that many samples (default: 10k) c: float, importance weight clipping factor (default: 10) trust_region bool, whether or not algorithms estimates the gradient KL divergence between the old and updated policy and uses it to determine step size (default: True) delta: float, max KL divergence between the old policy and updated policy (default: 1) alpha: float, momentum factor in the Polyak (exponential moving average) averaging of the model parameters (default: 0.99) load_path: str, path to load the model from (default: None) **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
baselines/acer/acer.py
def learn(network, env, seed=None, nsteps=20, total_timesteps=int(80e6), q_coef=0.5, ent_coef=0.01, max_grad_norm=10, lr=7e-4, lrschedule='linear', rprop_epsilon=1e-5, rprop_alpha=0.99, gamma=0.99, log_interval=100, buffer_size=50000, replay_ratio=4, replay_start=10000, c=10.0, trust_region=True, alpha=0.99, delta=1, load_path=None, **network_kwargs): ''' Main entrypoint for ACER (Actor-Critic with Experience Replay) algorithm (https://arxiv.org/pdf/1611.01224.pdf) Train an agent with given network architecture on a given environment using ACER. Parameters: ---------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See baselines.common/policies.py/lstm for more details on using recurrent nets in policies env: environment. Needs to be vectorized for parallel environment simulation. The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class. nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) (default: 20) nstack: int, size of the frame stack, i.e. number of the frames passed to the step model. Frames are stacked along channel dimension (last image dimension) (default: 4) total_timesteps: int, number of timesteps (i.e. number of actions taken in the environment) (default: 80M) q_coef: float, value function loss coefficient in the optimization objective (analog of vf_coef for other actor-critic methods) ent_coef: float, policy entropy coefficient in the optimization objective (default: 0.01) max_grad_norm: float, gradient norm clipping coefficient. If set to None, no clipping. (default: 10), lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4) lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and returns fraction of the learning rate (specified as lr) as output rprop_epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5) rprop_alpha: float, RMSProp decay parameter (default: 0.99) gamma: float, reward discounting factor (default: 0.99) log_interval: int, number of updates between logging events (default: 100) buffer_size: int, size of the replay buffer (default: 50k) replay_ratio: int, now many (on average) batches of data to sample from the replay buffer take after batch from the environment (default: 4) replay_start: int, the sampling from the replay buffer does not start until replay buffer has at least that many samples (default: 10k) c: float, importance weight clipping factor (default: 10) trust_region bool, whether or not algorithms estimates the gradient KL divergence between the old and updated policy and uses it to determine step size (default: True) delta: float, max KL divergence between the old policy and updated policy (default: 1) alpha: float, momentum factor in the Polyak (exponential moving average) averaging of the model parameters (default: 0.99) load_path: str, path to load the model from (default: None) **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' print("Running Acer Simple") print(locals()) set_global_seeds(seed) if not isinstance(env, VecFrameStack): env = VecFrameStack(env, 1) policy = build_policy(env, network, estimate_q=True, **network_kwargs) nenvs = env.num_envs ob_space = env.observation_space ac_space = env.action_space nstack = env.nstack model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, ent_coef=ent_coef, q_coef=q_coef, gamma=gamma, max_grad_norm=max_grad_norm, lr=lr, rprop_alpha=rprop_alpha, rprop_epsilon=rprop_epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, c=c, trust_region=trust_region, alpha=alpha, delta=delta) runner = Runner(env=env, model=model, nsteps=nsteps) if replay_ratio > 0: buffer = Buffer(env=env, nsteps=nsteps, size=buffer_size) else: buffer = None nbatch = nenvs*nsteps acer = Acer(runner, model, buffer, log_interval) acer.tstart = time.time() for acer.steps in range(0, total_timesteps, nbatch): #nbatch samples, 1 on_policy call and multiple off-policy calls acer.call(on_policy=True) if replay_ratio > 0 and buffer.has_atleast(replay_start): n = np.random.poisson(replay_ratio) for _ in range(n): acer.call(on_policy=False) # no simulation steps in this return model
def learn(network, env, seed=None, nsteps=20, total_timesteps=int(80e6), q_coef=0.5, ent_coef=0.01, max_grad_norm=10, lr=7e-4, lrschedule='linear', rprop_epsilon=1e-5, rprop_alpha=0.99, gamma=0.99, log_interval=100, buffer_size=50000, replay_ratio=4, replay_start=10000, c=10.0, trust_region=True, alpha=0.99, delta=1, load_path=None, **network_kwargs): ''' Main entrypoint for ACER (Actor-Critic with Experience Replay) algorithm (https://arxiv.org/pdf/1611.01224.pdf) Train an agent with given network architecture on a given environment using ACER. Parameters: ---------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See baselines.common/policies.py/lstm for more details on using recurrent nets in policies env: environment. Needs to be vectorized for parallel environment simulation. The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class. nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) (default: 20) nstack: int, size of the frame stack, i.e. number of the frames passed to the step model. Frames are stacked along channel dimension (last image dimension) (default: 4) total_timesteps: int, number of timesteps (i.e. number of actions taken in the environment) (default: 80M) q_coef: float, value function loss coefficient in the optimization objective (analog of vf_coef for other actor-critic methods) ent_coef: float, policy entropy coefficient in the optimization objective (default: 0.01) max_grad_norm: float, gradient norm clipping coefficient. If set to None, no clipping. (default: 10), lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4) lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and returns fraction of the learning rate (specified as lr) as output rprop_epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5) rprop_alpha: float, RMSProp decay parameter (default: 0.99) gamma: float, reward discounting factor (default: 0.99) log_interval: int, number of updates between logging events (default: 100) buffer_size: int, size of the replay buffer (default: 50k) replay_ratio: int, now many (on average) batches of data to sample from the replay buffer take after batch from the environment (default: 4) replay_start: int, the sampling from the replay buffer does not start until replay buffer has at least that many samples (default: 10k) c: float, importance weight clipping factor (default: 10) trust_region bool, whether or not algorithms estimates the gradient KL divergence between the old and updated policy and uses it to determine step size (default: True) delta: float, max KL divergence between the old policy and updated policy (default: 1) alpha: float, momentum factor in the Polyak (exponential moving average) averaging of the model parameters (default: 0.99) load_path: str, path to load the model from (default: None) **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' print("Running Acer Simple") print(locals()) set_global_seeds(seed) if not isinstance(env, VecFrameStack): env = VecFrameStack(env, 1) policy = build_policy(env, network, estimate_q=True, **network_kwargs) nenvs = env.num_envs ob_space = env.observation_space ac_space = env.action_space nstack = env.nstack model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, ent_coef=ent_coef, q_coef=q_coef, gamma=gamma, max_grad_norm=max_grad_norm, lr=lr, rprop_alpha=rprop_alpha, rprop_epsilon=rprop_epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, c=c, trust_region=trust_region, alpha=alpha, delta=delta) runner = Runner(env=env, model=model, nsteps=nsteps) if replay_ratio > 0: buffer = Buffer(env=env, nsteps=nsteps, size=buffer_size) else: buffer = None nbatch = nenvs*nsteps acer = Acer(runner, model, buffer, log_interval) acer.tstart = time.time() for acer.steps in range(0, total_timesteps, nbatch): #nbatch samples, 1 on_policy call and multiple off-policy calls acer.call(on_policy=True) if replay_ratio > 0 and buffer.has_atleast(replay_start): n = np.random.poisson(replay_ratio) for _ in range(n): acer.call(on_policy=False) # no simulation steps in this return model
[ "Main", "entrypoint", "for", "ACER", "(", "Actor", "-", "Critic", "with", "Experience", "Replay", ")", "algorithm", "(", "https", ":", "//", "arxiv", ".", "org", "/", "pdf", "/", "1611", ".", "01224", ".", "pdf", ")", "Train", "an", "agent", "with", ...
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/acer/acer.py#L274-L377
[ "def", "learn", "(", "network", ",", "env", ",", "seed", "=", "None", ",", "nsteps", "=", "20", ",", "total_timesteps", "=", "int", "(", "80e6", ")", ",", "q_coef", "=", "0.5", ",", "ent_coef", "=", "0.01", ",", "max_grad_norm", "=", "10", ",", "lr...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
KfacOptimizer.apply_stats
compute stats and update/apply the new stats to the running average
baselines/acktr/kfac.py
def apply_stats(self, statsUpdates): """ compute stats and update/apply the new stats to the running average """ def updateAccumStats(): if self._full_stats_init: return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op) else: return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)) def updateRunningAvgStats(statsUpdates, fac_iter=1): # return tf.cond(tf.greater_equal(self.factor_step, # tf.convert_to_tensor(fac_iter)), lambda: # tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op) return tf.group(*self._apply_stats(statsUpdates)) if self._async_stats: # asynchronous stats update update_stats = self._apply_stats(statsUpdates) queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[ item.get_shape() for item in update_stats]) enqueue_op = queue.enqueue(update_stats) def dequeue_stats_op(): return queue.dequeue() self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op]) update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor( 0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ])) else: # synchronous stats update update_stats_op = tf.cond(tf.greater_equal( self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats) self._update_stats_op = update_stats_op return update_stats_op
def apply_stats(self, statsUpdates): """ compute stats and update/apply the new stats to the running average """ def updateAccumStats(): if self._full_stats_init: return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op) else: return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)) def updateRunningAvgStats(statsUpdates, fac_iter=1): # return tf.cond(tf.greater_equal(self.factor_step, # tf.convert_to_tensor(fac_iter)), lambda: # tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op) return tf.group(*self._apply_stats(statsUpdates)) if self._async_stats: # asynchronous stats update update_stats = self._apply_stats(statsUpdates) queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[ item.get_shape() for item in update_stats]) enqueue_op = queue.enqueue(update_stats) def dequeue_stats_op(): return queue.dequeue() self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op]) update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor( 0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ])) else: # synchronous stats update update_stats_op = tf.cond(tf.greater_equal( self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats) self._update_stats_op = update_stats_op return update_stats_op
[ "compute", "stats", "and", "update", "/", "apply", "the", "new", "stats", "to", "the", "running", "average" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/acktr/kfac.py#L440-L474
[ "def", "apply_stats", "(", "self", ",", "statsUpdates", ")", ":", "def", "updateAccumStats", "(", ")", ":", "if", "self", ".", "_full_stats_init", ":", "return", "tf", ".", "cond", "(", "tf", ".", "greater", "(", "self", ".", "sgd_step", ",", "self", "...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
tile_images
Tile N images into one big PxQ image (P,Q) are chosen to be as close as possible, and if N is square, then P=Q. input: img_nhwc, list or array of images, ndim=4 once turned into array n = batch index, h = height, w = width, c = channel returns: bigim_HWc, ndarray with ndim=3
baselines/common/tile_images.py
def tile_images(img_nhwc): """ Tile N images into one big PxQ image (P,Q) are chosen to be as close as possible, and if N is square, then P=Q. input: img_nhwc, list or array of images, ndim=4 once turned into array n = batch index, h = height, w = width, c = channel returns: bigim_HWc, ndarray with ndim=3 """ img_nhwc = np.asarray(img_nhwc) N, h, w, c = img_nhwc.shape H = int(np.ceil(np.sqrt(N))) W = int(np.ceil(float(N)/H)) img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)]) img_HWhwc = img_nhwc.reshape(H, W, h, w, c) img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4) img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c) return img_Hh_Ww_c
def tile_images(img_nhwc): """ Tile N images into one big PxQ image (P,Q) are chosen to be as close as possible, and if N is square, then P=Q. input: img_nhwc, list or array of images, ndim=4 once turned into array n = batch index, h = height, w = width, c = channel returns: bigim_HWc, ndarray with ndim=3 """ img_nhwc = np.asarray(img_nhwc) N, h, w, c = img_nhwc.shape H = int(np.ceil(np.sqrt(N))) W = int(np.ceil(float(N)/H)) img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)]) img_HWhwc = img_nhwc.reshape(H, W, h, w, c) img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4) img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c) return img_Hh_Ww_c
[ "Tile", "N", "images", "into", "one", "big", "PxQ", "image", "(", "P", "Q", ")", "are", "chosen", "to", "be", "as", "close", "as", "possible", "and", "if", "N", "is", "square", "then", "P", "=", "Q", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tile_images.py#L3-L22
[ "def", "tile_images", "(", "img_nhwc", ")", ":", "img_nhwc", "=", "np", ".", "asarray", "(", "img_nhwc", ")", "N", ",", "h", ",", "w", ",", "c", "=", "img_nhwc", ".", "shape", "H", "=", "int", "(", "np", ".", "ceil", "(", "np", ".", "sqrt", "("...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
SumSegmentTree.sum
Returns arr[start] + ... + arr[end]
baselines/common/segment_tree.py
def sum(self, start=0, end=None): """Returns arr[start] + ... + arr[end]""" return super(SumSegmentTree, self).reduce(start, end)
def sum(self, start=0, end=None): """Returns arr[start] + ... + arr[end]""" return super(SumSegmentTree, self).reduce(start, end)
[ "Returns", "arr", "[", "start", "]", "+", "...", "+", "arr", "[", "end", "]" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/segment_tree.py#L101-L103
[ "def", "sum", "(", "self", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "return", "super", "(", "SumSegmentTree", ",", "self", ")", ".", "reduce", "(", "start", ",", "end", ")" ]
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
SumSegmentTree.find_prefixsum_idx
Find the highest index `i` in the array such that sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum if array values are probabilities, this function allows to sample indexes according to the discrete probability efficiently. Parameters ---------- perfixsum: float upperbound on the sum of array prefix Returns ------- idx: int highest index satisfying the prefixsum constraint
baselines/common/segment_tree.py
def find_prefixsum_idx(self, prefixsum): """Find the highest index `i` in the array such that sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum if array values are probabilities, this function allows to sample indexes according to the discrete probability efficiently. Parameters ---------- perfixsum: float upperbound on the sum of array prefix Returns ------- idx: int highest index satisfying the prefixsum constraint """ assert 0 <= prefixsum <= self.sum() + 1e-5 idx = 1 while idx < self._capacity: # while non-leaf if self._value[2 * idx] > prefixsum: idx = 2 * idx else: prefixsum -= self._value[2 * idx] idx = 2 * idx + 1 return idx - self._capacity
def find_prefixsum_idx(self, prefixsum): """Find the highest index `i` in the array such that sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum if array values are probabilities, this function allows to sample indexes according to the discrete probability efficiently. Parameters ---------- perfixsum: float upperbound on the sum of array prefix Returns ------- idx: int highest index satisfying the prefixsum constraint """ assert 0 <= prefixsum <= self.sum() + 1e-5 idx = 1 while idx < self._capacity: # while non-leaf if self._value[2 * idx] > prefixsum: idx = 2 * idx else: prefixsum -= self._value[2 * idx] idx = 2 * idx + 1 return idx - self._capacity
[ "Find", "the", "highest", "index", "i", "in", "the", "array", "such", "that", "sum", "(", "arr", "[", "0", "]", "+", "arr", "[", "1", "]", "+", "...", "+", "arr", "[", "i", "-", "i", "]", ")", "<", "=", "prefixsum" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/segment_tree.py#L105-L131
[ "def", "find_prefixsum_idx", "(", "self", ",", "prefixsum", ")", ":", "assert", "0", "<=", "prefixsum", "<=", "self", ".", "sum", "(", ")", "+", "1e-5", "idx", "=", "1", "while", "idx", "<", "self", ".", "_capacity", ":", "# while non-leaf", "if", "sel...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
MinSegmentTree.min
Returns min(arr[start], ..., arr[end])
baselines/common/segment_tree.py
def min(self, start=0, end=None): """Returns min(arr[start], ..., arr[end])""" return super(MinSegmentTree, self).reduce(start, end)
def min(self, start=0, end=None): """Returns min(arr[start], ..., arr[end])""" return super(MinSegmentTree, self).reduce(start, end)
[ "Returns", "min", "(", "arr", "[", "start", "]", "...", "arr", "[", "end", "]", ")" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/segment_tree.py#L142-L145
[ "def", "min", "(", "self", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "return", "super", "(", "MinSegmentTree", ",", "self", ")", ".", "reduce", "(", "start", ",", "end", ")" ]
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
PiecewiseSchedule.value
See Schedule.value
baselines/common/schedules.py
def value(self, t): """See Schedule.value""" for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]): if l_t <= t and t < r_t: alpha = float(t - l_t) / (r_t - l_t) return self._interpolation(l, r, alpha) # t does not belong to any of the pieces, so doom. assert self._outside_value is not None return self._outside_value
def value(self, t): """See Schedule.value""" for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]): if l_t <= t and t < r_t: alpha = float(t - l_t) / (r_t - l_t) return self._interpolation(l, r, alpha) # t does not belong to any of the pieces, so doom. assert self._outside_value is not None return self._outside_value
[ "See", "Schedule", ".", "value" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/schedules.py#L64-L73
[ "def", "value", "(", "self", ",", "t", ")", ":", "for", "(", "l_t", ",", "l", ")", ",", "(", "r_t", ",", "r", ")", "in", "zip", "(", "self", ".", "_endpoints", "[", ":", "-", "1", "]", ",", "self", ".", "_endpoints", "[", "1", ":", "]", "...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
_subproc_worker
Control a single environment instance using IPC and shared memory.
baselines/common/vec_env/shmem_vec_env.py
def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys): """ Control a single environment instance using IPC and shared memory. """ def _write_obs(maybe_dict_obs): flatdict = obs_to_dict(maybe_dict_obs) for k in keys: dst = obs_bufs[k].get_obj() dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k]) # pylint: disable=W0212 np.copyto(dst_np, flatdict[k]) env = env_fn_wrapper.x() parent_pipe.close() try: while True: cmd, data = pipe.recv() if cmd == 'reset': pipe.send(_write_obs(env.reset())) elif cmd == 'step': obs, reward, done, info = env.step(data) if done: obs = env.reset() pipe.send((_write_obs(obs), reward, done, info)) elif cmd == 'render': pipe.send(env.render(mode='rgb_array')) elif cmd == 'close': pipe.send(None) break else: raise RuntimeError('Got unrecognized cmd %s' % cmd) except KeyboardInterrupt: print('ShmemVecEnv worker: got KeyboardInterrupt') finally: env.close()
def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys): """ Control a single environment instance using IPC and shared memory. """ def _write_obs(maybe_dict_obs): flatdict = obs_to_dict(maybe_dict_obs) for k in keys: dst = obs_bufs[k].get_obj() dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k]) # pylint: disable=W0212 np.copyto(dst_np, flatdict[k]) env = env_fn_wrapper.x() parent_pipe.close() try: while True: cmd, data = pipe.recv() if cmd == 'reset': pipe.send(_write_obs(env.reset())) elif cmd == 'step': obs, reward, done, info = env.step(data) if done: obs = env.reset() pipe.send((_write_obs(obs), reward, done, info)) elif cmd == 'render': pipe.send(env.render(mode='rgb_array')) elif cmd == 'close': pipe.send(None) break else: raise RuntimeError('Got unrecognized cmd %s' % cmd) except KeyboardInterrupt: print('ShmemVecEnv worker: got KeyboardInterrupt') finally: env.close()
[ "Control", "a", "single", "environment", "instance", "using", "IPC", "and", "shared", "memory", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/vec_env/shmem_vec_env.py#L105-L139
[ "def", "_subproc_worker", "(", "pipe", ",", "parent_pipe", ",", "env_fn_wrapper", ",", "obs_bufs", ",", "obs_shapes", ",", "obs_dtypes", ",", "keys", ")", ":", "def", "_write_obs", "(", "maybe_dict_obs", ")", ":", "flatdict", "=", "obs_to_dict", "(", "maybe_di...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
learn
Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm. Parameters: ----------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See baselines.common/policies.py/lstm for more details on using recurrent nets in policies env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py) seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible) nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int, total number of timesteps to train on (default: 80M) vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5) ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01) max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5) lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4) lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and returns fraction of the learning rate (specified as lr) as output epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5) alpha: float, RMSProp decay parameter (default: 0.99) gamma: float, reward discounting parameter (default: 0.99) log_interval: int, specifies how frequently the logs are printed out (default: 100) **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
baselines/a2c/a2c.py
def learn( network, env, seed=None, nsteps=5, total_timesteps=int(80e6), vf_coef=0.5, ent_coef=0.01, max_grad_norm=0.5, lr=7e-4, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, load_path=None, **network_kwargs): ''' Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm. Parameters: ----------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See baselines.common/policies.py/lstm for more details on using recurrent nets in policies env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py) seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible) nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int, total number of timesteps to train on (default: 80M) vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5) ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01) max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5) lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4) lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and returns fraction of the learning rate (specified as lr) as output epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5) alpha: float, RMSProp decay parameter (default: 0.99) gamma: float, reward discounting parameter (default: 0.99) log_interval: int, specifies how frequently the logs are printed out (default: 100) **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' set_global_seeds(seed) # Get the nb of env nenvs = env.num_envs policy = build_policy(env, network, **network_kwargs) # Instantiate the model object (that creates step_model and train_model) model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule) if load_path is not None: model.load(load_path) # Instantiate the runner object runner = Runner(env, model, nsteps=nsteps, gamma=gamma) epinfobuf = deque(maxlen=100) # Calculate the batch_size nbatch = nenvs*nsteps # Start total timer tstart = time.time() for update in range(1, total_timesteps//nbatch+1): # Get mini batch of experiences obs, states, rewards, masks, actions, values, epinfos = runner.run() epinfobuf.extend(epinfos) policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values) nseconds = time.time()-tstart # Calculate the fps (frame per second) fps = int((update*nbatch)/nseconds) if update % log_interval == 0 or update == 1: # Calculates if value function is a good predicator of the returns (ev > 1) # or if it's just worse than predicting nothing (ev =< 0) ev = explained_variance(values, rewards) logger.record_tabular("nupdates", update) logger.record_tabular("total_timesteps", update*nbatch) logger.record_tabular("fps", fps) logger.record_tabular("policy_entropy", float(policy_entropy)) logger.record_tabular("value_loss", float(value_loss)) logger.record_tabular("explained_variance", float(ev)) logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf])) logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf])) logger.dump_tabular() return model
def learn( network, env, seed=None, nsteps=5, total_timesteps=int(80e6), vf_coef=0.5, ent_coef=0.01, max_grad_norm=0.5, lr=7e-4, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, load_path=None, **network_kwargs): ''' Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm. Parameters: ----------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See baselines.common/policies.py/lstm for more details on using recurrent nets in policies env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py) seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible) nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int, total number of timesteps to train on (default: 80M) vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5) ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01) max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5) lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4) lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and returns fraction of the learning rate (specified as lr) as output epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5) alpha: float, RMSProp decay parameter (default: 0.99) gamma: float, reward discounting parameter (default: 0.99) log_interval: int, specifies how frequently the logs are printed out (default: 100) **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' set_global_seeds(seed) # Get the nb of env nenvs = env.num_envs policy = build_policy(env, network, **network_kwargs) # Instantiate the model object (that creates step_model and train_model) model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule) if load_path is not None: model.load(load_path) # Instantiate the runner object runner = Runner(env, model, nsteps=nsteps, gamma=gamma) epinfobuf = deque(maxlen=100) # Calculate the batch_size nbatch = nenvs*nsteps # Start total timer tstart = time.time() for update in range(1, total_timesteps//nbatch+1): # Get mini batch of experiences obs, states, rewards, masks, actions, values, epinfos = runner.run() epinfobuf.extend(epinfos) policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values) nseconds = time.time()-tstart # Calculate the fps (frame per second) fps = int((update*nbatch)/nseconds) if update % log_interval == 0 or update == 1: # Calculates if value function is a good predicator of the returns (ev > 1) # or if it's just worse than predicting nothing (ev =< 0) ev = explained_variance(values, rewards) logger.record_tabular("nupdates", update) logger.record_tabular("total_timesteps", update*nbatch) logger.record_tabular("fps", fps) logger.record_tabular("policy_entropy", float(policy_entropy)) logger.record_tabular("value_loss", float(value_loss)) logger.record_tabular("explained_variance", float(ev)) logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf])) logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf])) logger.dump_tabular() return model
[ "Main", "entrypoint", "for", "A2C", "algorithm", ".", "Train", "a", "policy", "with", "given", "network", "architecture", "on", "a", "given", "environment", "using", "a2c", "algorithm", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/a2c/a2c.py#L119-L231
[ "def", "learn", "(", "network", ",", "env", ",", "seed", "=", "None", ",", "nsteps", "=", "5", ",", "total_timesteps", "=", "int", "(", "80e6", ")", ",", "vf_coef", "=", "0.5", ",", "ent_coef", "=", "0.01", ",", "max_grad_norm", "=", "0.5", ",", "l...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
sf01
swap and then flatten axes 0 and 1
baselines/ppo2/runner.py
def sf01(arr): """ swap and then flatten axes 0 and 1 """ s = arr.shape return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def sf01(arr): """ swap and then flatten axes 0 and 1 """ s = arr.shape return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
[ "swap", "and", "then", "flatten", "axes", "0", "and", "1" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/ppo2/runner.py#L69-L74
[ "def", "sf01", "(", "arr", ")", ":", "s", "=", "arr", ".", "shape", "return", "arr", ".", "swapaxes", "(", "0", ",", "1", ")", ".", "reshape", "(", "s", "[", "0", "]", "*", "s", "[", "1", "]", ",", "*", "s", "[", "2", ":", "]", ")" ]
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
PolicyWithValue.step
Compute next action(s) given the observation(s) Parameters: ---------- observation observation data (either single or a batch) **extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__) Returns: ------- (action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
baselines/common/policies.py
def step(self, observation, **extra_feed): """ Compute next action(s) given the observation(s) Parameters: ---------- observation observation data (either single or a batch) **extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__) Returns: ------- (action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple """ a, v, state, neglogp = self._evaluate([self.action, self.vf, self.state, self.neglogp], observation, **extra_feed) if state.size == 0: state = None return a, v, state, neglogp
def step(self, observation, **extra_feed): """ Compute next action(s) given the observation(s) Parameters: ---------- observation observation data (either single or a batch) **extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__) Returns: ------- (action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple """ a, v, state, neglogp = self._evaluate([self.action, self.vf, self.state, self.neglogp], observation, **extra_feed) if state.size == 0: state = None return a, v, state, neglogp
[ "Compute", "next", "action", "(", "s", ")", "given", "the", "observation", "(", "s", ")" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/policies.py#L77-L96
[ "def", "step", "(", "self", ",", "observation", ",", "*", "*", "extra_feed", ")", ":", "a", ",", "v", ",", "state", ",", "neglogp", "=", "self", ".", "_evaluate", "(", "[", "self", ".", "action", ",", "self", ".", "vf", ",", "self", ".", "state",...
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
PolicyWithValue.value
Compute value estimate(s) given the observation(s) Parameters: ---------- observation observation data (either single or a batch) **extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__) Returns: ------- value estimate
baselines/common/policies.py
def value(self, ob, *args, **kwargs): """ Compute value estimate(s) given the observation(s) Parameters: ---------- observation observation data (either single or a batch) **extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__) Returns: ------- value estimate """ return self._evaluate(self.vf, ob, *args, **kwargs)
def value(self, ob, *args, **kwargs): """ Compute value estimate(s) given the observation(s) Parameters: ---------- observation observation data (either single or a batch) **extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__) Returns: ------- value estimate """ return self._evaluate(self.vf, ob, *args, **kwargs)
[ "Compute", "value", "estimate", "(", "s", ")", "given", "the", "observation", "(", "s", ")" ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/policies.py#L98-L113
[ "def", "value", "(", "self", ",", "ob", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_evaluate", "(", "self", ".", "vf", ",", "ob", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
3301089b48c42b87b396e246ea3f56fa4bfc9678
valid
pretty_eta
Print the number of seconds in human readable format. Examples: 2 days 2 hours and 37 minutes less than a minute Paramters --------- seconds_left: int Number of seconds to be converted to the ETA Returns ------- eta: str String representing the pretty ETA.
baselines/common/misc_util.py
def pretty_eta(seconds_left): """Print the number of seconds in human readable format. Examples: 2 days 2 hours and 37 minutes less than a minute Paramters --------- seconds_left: int Number of seconds to be converted to the ETA Returns ------- eta: str String representing the pretty ETA. """ minutes_left = seconds_left // 60 seconds_left %= 60 hours_left = minutes_left // 60 minutes_left %= 60 days_left = hours_left // 24 hours_left %= 24 def helper(cnt, name): return "{} {}{}".format(str(cnt), name, ('s' if cnt > 1 else '')) if days_left > 0: msg = helper(days_left, 'day') if hours_left > 0: msg += ' and ' + helper(hours_left, 'hour') return msg if hours_left > 0: msg = helper(hours_left, 'hour') if minutes_left > 0: msg += ' and ' + helper(minutes_left, 'minute') return msg if minutes_left > 0: return helper(minutes_left, 'minute') return 'less than a minute'
def pretty_eta(seconds_left): """Print the number of seconds in human readable format. Examples: 2 days 2 hours and 37 minutes less than a minute Paramters --------- seconds_left: int Number of seconds to be converted to the ETA Returns ------- eta: str String representing the pretty ETA. """ minutes_left = seconds_left // 60 seconds_left %= 60 hours_left = minutes_left // 60 minutes_left %= 60 days_left = hours_left // 24 hours_left %= 24 def helper(cnt, name): return "{} {}{}".format(str(cnt), name, ('s' if cnt > 1 else '')) if days_left > 0: msg = helper(days_left, 'day') if hours_left > 0: msg += ' and ' + helper(hours_left, 'hour') return msg if hours_left > 0: msg = helper(hours_left, 'hour') if minutes_left > 0: msg += ' and ' + helper(minutes_left, 'minute') return msg if minutes_left > 0: return helper(minutes_left, 'minute') return 'less than a minute'
[ "Print", "the", "number", "of", "seconds", "in", "human", "readable", "format", "." ]
openai/baselines
python
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/misc_util.py#L65-L104
[ "def", "pretty_eta", "(", "seconds_left", ")", ":", "minutes_left", "=", "seconds_left", "//", "60", "seconds_left", "%=", "60", "hours_left", "=", "minutes_left", "//", "60", "minutes_left", "%=", "60", "days_left", "=", "hours_left", "//", "24", "hours_left", ...
3301089b48c42b87b396e246ea3f56fa4bfc9678