id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
245,200
deifyed/vault
libconman/target.py
Target.deploy
def deploy(self): ''' Creates a link at the original path of this target ''' if not os.path.exists(self.path): makedirs(self.path) link(self.vault_path, self.real_path)
python
def deploy(self): ''' Creates a link at the original path of this target ''' if not os.path.exists(self.path): makedirs(self.path) link(self.vault_path, self.real_path)
[ "def", "deploy", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "path", ")", ":", "makedirs", "(", "self", ".", "path", ")", "link", "(", "self", ".", "vault_path", ",", "self", ".", "real_path", ")" ]
Creates a link at the original path of this target
[ "Creates", "a", "link", "at", "the", "original", "path", "of", "this", "target" ]
e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97
https://github.com/deifyed/vault/blob/e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97/libconman/target.py#L83-L90
245,201
riccardocagnasso/useless
src/useless/common/__init__.py
parse_cstring
def parse_cstring(stream, offset): """ parse_cstring will parse a null-terminated string in a bytestream. The string will be decoded with UTF-8 decoder, of course since we are doing this byte-a-byte, it won't really work for all Unicode strings. TODO: add proper Unicode support """ stream.seek(offset) string = "" while True: char = struct.unpack('c', stream.read(1))[0] if char == b'\x00': return string else: string += char.decode()
python
def parse_cstring(stream, offset): """ parse_cstring will parse a null-terminated string in a bytestream. The string will be decoded with UTF-8 decoder, of course since we are doing this byte-a-byte, it won't really work for all Unicode strings. TODO: add proper Unicode support """ stream.seek(offset) string = "" while True: char = struct.unpack('c', stream.read(1))[0] if char == b'\x00': return string else: string += char.decode()
[ "def", "parse_cstring", "(", "stream", ",", "offset", ")", ":", "stream", ".", "seek", "(", "offset", ")", "string", "=", "\"\"", "while", "True", ":", "char", "=", "struct", ".", "unpack", "(", "'c'", ",", "stream", ".", "read", "(", "1", ")", ")", "[", "0", "]", "if", "char", "==", "b'\\x00'", ":", "return", "string", "else", ":", "string", "+=", "char", ".", "decode", "(", ")" ]
parse_cstring will parse a null-terminated string in a bytestream. The string will be decoded with UTF-8 decoder, of course since we are doing this byte-a-byte, it won't really work for all Unicode strings. TODO: add proper Unicode support
[ "parse_cstring", "will", "parse", "a", "null", "-", "terminated", "string", "in", "a", "bytestream", "." ]
5167aab82958f653148e3689c9a7e548d4fa2cba
https://github.com/riccardocagnasso/useless/blob/5167aab82958f653148e3689c9a7e548d4fa2cba/src/useless/common/__init__.py#L29-L48
245,202
lambdalisue/maidenhair
src/maidenhair/classification/unite.py
unite_dataset
def unite_dataset(dataset, basecolumn, fn=None): """ Unite dataset via fn Parameters ---------- dataset : list A list of data basecolumn : int A number of column which will be respected in uniting dataset fn : function A function which recieve :attr:`data` and return classification string. It if is None, a function which return the first item of the :attr:`data` will be used (See ``with_filename`` parameter of :func:`maidenhair.load` function). Returns ------- list A united dataset """ # create default unite_fn if fn is None: fn = default_unite_function # classify dataset via unite_fn united_dataset = OrderedDict() for data in dataset: unite_name = fn(data) if unite_name not in united_dataset: united_dataset[unite_name] = [] united_dataset[unite_name].append(data[1:]) # unite dataset via maidenhair.loaders.base.unite_dataset for name, dataset in united_dataset.items(): united_dataset[name] = _unite_dataset(dataset, basecolumn)[0] # create new dataset (respect the order of the dataset) dataset = [] for name, _dataset in united_dataset.items(): dataset.append([name] + _dataset) return dataset
python
def unite_dataset(dataset, basecolumn, fn=None): """ Unite dataset via fn Parameters ---------- dataset : list A list of data basecolumn : int A number of column which will be respected in uniting dataset fn : function A function which recieve :attr:`data` and return classification string. It if is None, a function which return the first item of the :attr:`data` will be used (See ``with_filename`` parameter of :func:`maidenhair.load` function). Returns ------- list A united dataset """ # create default unite_fn if fn is None: fn = default_unite_function # classify dataset via unite_fn united_dataset = OrderedDict() for data in dataset: unite_name = fn(data) if unite_name not in united_dataset: united_dataset[unite_name] = [] united_dataset[unite_name].append(data[1:]) # unite dataset via maidenhair.loaders.base.unite_dataset for name, dataset in united_dataset.items(): united_dataset[name] = _unite_dataset(dataset, basecolumn)[0] # create new dataset (respect the order of the dataset) dataset = [] for name, _dataset in united_dataset.items(): dataset.append([name] + _dataset) return dataset
[ "def", "unite_dataset", "(", "dataset", ",", "basecolumn", ",", "fn", "=", "None", ")", ":", "# create default unite_fn", "if", "fn", "is", "None", ":", "fn", "=", "default_unite_function", "# classify dataset via unite_fn", "united_dataset", "=", "OrderedDict", "(", ")", "for", "data", "in", "dataset", ":", "unite_name", "=", "fn", "(", "data", ")", "if", "unite_name", "not", "in", "united_dataset", ":", "united_dataset", "[", "unite_name", "]", "=", "[", "]", "united_dataset", "[", "unite_name", "]", ".", "append", "(", "data", "[", "1", ":", "]", ")", "# unite dataset via maidenhair.loaders.base.unite_dataset", "for", "name", ",", "dataset", "in", "united_dataset", ".", "items", "(", ")", ":", "united_dataset", "[", "name", "]", "=", "_unite_dataset", "(", "dataset", ",", "basecolumn", ")", "[", "0", "]", "# create new dataset (respect the order of the dataset)", "dataset", "=", "[", "]", "for", "name", ",", "_dataset", "in", "united_dataset", ".", "items", "(", ")", ":", "dataset", ".", "append", "(", "[", "name", "]", "+", "_dataset", ")", "return", "dataset" ]
Unite dataset via fn Parameters ---------- dataset : list A list of data basecolumn : int A number of column which will be respected in uniting dataset fn : function A function which recieve :attr:`data` and return classification string. It if is None, a function which return the first item of the :attr:`data` will be used (See ``with_filename`` parameter of :func:`maidenhair.load` function). Returns ------- list A united dataset
[ "Unite", "dataset", "via", "fn" ]
d5095c1087d1f4d71cc57410492151d2803a9f0d
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/classification/unite.py#L32-L70
245,203
collectiveacuity/labPack
labpack/speech/watson.py
watsonSpeechClient._create_folder
def _create_folder(self): ''' a helper method for creating a temporary audio clip folder ''' # import dependencies import os from labpack.platforms.localhost import localhostClient from labpack.records.id import labID # create folder in user app data record_id = labID() collection_name = 'Watson Speech2Text' localhost_client = localhostClient() app_folder = localhost_client.app_data(org_name=__team__, prod_name=__module__) if localhost_client.os in ('Linux', 'FreeBSD', 'Solaris'): collection_name = collection_name.replace(' ', '-').lower() collection_folder = os.path.join(app_folder, collection_name) clip_folder = os.path.join(collection_folder, record_id.id24) if not os.path.exists(clip_folder): os.makedirs(clip_folder) return clip_folder
python
def _create_folder(self): ''' a helper method for creating a temporary audio clip folder ''' # import dependencies import os from labpack.platforms.localhost import localhostClient from labpack.records.id import labID # create folder in user app data record_id = labID() collection_name = 'Watson Speech2Text' localhost_client = localhostClient() app_folder = localhost_client.app_data(org_name=__team__, prod_name=__module__) if localhost_client.os in ('Linux', 'FreeBSD', 'Solaris'): collection_name = collection_name.replace(' ', '-').lower() collection_folder = os.path.join(app_folder, collection_name) clip_folder = os.path.join(collection_folder, record_id.id24) if not os.path.exists(clip_folder): os.makedirs(clip_folder) return clip_folder
[ "def", "_create_folder", "(", "self", ")", ":", "# import dependencies\r", "import", "os", "from", "labpack", ".", "platforms", ".", "localhost", "import", "localhostClient", "from", "labpack", ".", "records", ".", "id", "import", "labID", "# create folder in user app data\r", "record_id", "=", "labID", "(", ")", "collection_name", "=", "'Watson Speech2Text'", "localhost_client", "=", "localhostClient", "(", ")", "app_folder", "=", "localhost_client", ".", "app_data", "(", "org_name", "=", "__team__", ",", "prod_name", "=", "__module__", ")", "if", "localhost_client", ".", "os", "in", "(", "'Linux'", ",", "'FreeBSD'", ",", "'Solaris'", ")", ":", "collection_name", "=", "collection_name", ".", "replace", "(", "' '", ",", "'-'", ")", ".", "lower", "(", ")", "collection_folder", "=", "os", ".", "path", ".", "join", "(", "app_folder", ",", "collection_name", ")", "clip_folder", "=", "os", ".", "path", ".", "join", "(", "collection_folder", ",", "record_id", ".", "id24", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "clip_folder", ")", ":", "os", ".", "makedirs", "(", "clip_folder", ")", "return", "clip_folder" ]
a helper method for creating a temporary audio clip folder
[ "a", "helper", "method", "for", "creating", "a", "temporary", "audio", "clip", "folder" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/speech/watson.py#L151-L172
245,204
collectiveacuity/labPack
labpack/speech/watson.py
watsonSpeechClient._transcribe_files
def _transcribe_files(self, file_list, file_mimetype): ''' a helper method for multi-processing file transcription ''' # import dependencies import queue from threading import Thread # define multithreading function def _recognize_file(file_path, file_mimetype, queue): details = { 'error': '', 'results': [] } file_data = open(file_path, 'rb') try: transcript = self.client.recognize(file_data, file_mimetype, continuous=True) if transcript['results']: for result in transcript['results']: details['results'].append(result['alternatives'][0]) except Exception as err: details['error'] = err queue.put(details) # construct queue q = queue.Queue() # send clips to watson for transcription in separate threads threads = [] for file in file_list: watson_job = Thread(target=_recognize_file, args=(file, file_mimetype, q)) watson_job.start() threads.append(watson_job) # wait for threads to end for t in threads: t.join() # construct result transcript_details = { 'error': '', 'segments': [] } for i in range(len(file_list)): job_details = q.get() if job_details['error']: transcript_details['error'] = job_details['error'] transcript_details['segments'].extend(job_details['results']) return transcript_details
python
def _transcribe_files(self, file_list, file_mimetype): ''' a helper method for multi-processing file transcription ''' # import dependencies import queue from threading import Thread # define multithreading function def _recognize_file(file_path, file_mimetype, queue): details = { 'error': '', 'results': [] } file_data = open(file_path, 'rb') try: transcript = self.client.recognize(file_data, file_mimetype, continuous=True) if transcript['results']: for result in transcript['results']: details['results'].append(result['alternatives'][0]) except Exception as err: details['error'] = err queue.put(details) # construct queue q = queue.Queue() # send clips to watson for transcription in separate threads threads = [] for file in file_list: watson_job = Thread(target=_recognize_file, args=(file, file_mimetype, q)) watson_job.start() threads.append(watson_job) # wait for threads to end for t in threads: t.join() # construct result transcript_details = { 'error': '', 'segments': [] } for i in range(len(file_list)): job_details = q.get() if job_details['error']: transcript_details['error'] = job_details['error'] transcript_details['segments'].extend(job_details['results']) return transcript_details
[ "def", "_transcribe_files", "(", "self", ",", "file_list", ",", "file_mimetype", ")", ":", "# import dependencies\r", "import", "queue", "from", "threading", "import", "Thread", "# define multithreading function\r", "def", "_recognize_file", "(", "file_path", ",", "file_mimetype", ",", "queue", ")", ":", "details", "=", "{", "'error'", ":", "''", ",", "'results'", ":", "[", "]", "}", "file_data", "=", "open", "(", "file_path", ",", "'rb'", ")", "try", ":", "transcript", "=", "self", ".", "client", ".", "recognize", "(", "file_data", ",", "file_mimetype", ",", "continuous", "=", "True", ")", "if", "transcript", "[", "'results'", "]", ":", "for", "result", "in", "transcript", "[", "'results'", "]", ":", "details", "[", "'results'", "]", ".", "append", "(", "result", "[", "'alternatives'", "]", "[", "0", "]", ")", "except", "Exception", "as", "err", ":", "details", "[", "'error'", "]", "=", "err", "queue", ".", "put", "(", "details", ")", "# construct queue\r", "q", "=", "queue", ".", "Queue", "(", ")", "# send clips to watson for transcription in separate threads\r", "threads", "=", "[", "]", "for", "file", "in", "file_list", ":", "watson_job", "=", "Thread", "(", "target", "=", "_recognize_file", ",", "args", "=", "(", "file", ",", "file_mimetype", ",", "q", ")", ")", "watson_job", ".", "start", "(", ")", "threads", ".", "append", "(", "watson_job", ")", "# wait for threads to end\r", "for", "t", "in", "threads", ":", "t", ".", "join", "(", ")", "# construct result\r", "transcript_details", "=", "{", "'error'", ":", "''", ",", "'segments'", ":", "[", "]", "}", "for", "i", "in", "range", "(", "len", "(", "file_list", ")", ")", ":", "job_details", "=", "q", ".", "get", "(", ")", "if", "job_details", "[", "'error'", "]", ":", "transcript_details", "[", "'error'", "]", "=", "job_details", "[", "'error'", "]", "transcript_details", "[", "'segments'", "]", ".", "extend", "(", "job_details", "[", "'results'", "]", ")", "return", "transcript_details" ]
a helper method for multi-processing file transcription
[ "a", "helper", "method", "for", "multi", "-", "processing", "file", "transcription" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/speech/watson.py#L174-L226
245,205
mbodenhamer/syn
syn/sets/b/base.py
SetNode.lazy_enumerate
def lazy_enumerate(self, **kwargs): '''Enumerate without evaluating any sets. ''' kwargs['lazy'] = True for item in self.enumerate(**kwargs): yield item
python
def lazy_enumerate(self, **kwargs): '''Enumerate without evaluating any sets. ''' kwargs['lazy'] = True for item in self.enumerate(**kwargs): yield item
[ "def", "lazy_enumerate", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'lazy'", "]", "=", "True", "for", "item", "in", "self", ".", "enumerate", "(", "*", "*", "kwargs", ")", ":", "yield", "item" ]
Enumerate without evaluating any sets.
[ "Enumerate", "without", "evaluating", "any", "sets", "." ]
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/sets/b/base.py#L95-L100
245,206
theirc/rapidsms-multitenancy
multitenancy/views.py
group_selection
def group_selection(request): """Allow user to select a TenantGroup if they have more than one.""" groups = get_user_groups(request.user) count = len(groups) if count == 1: # Redirect to the detail page for this group return redirect(groups[0]) context = { 'groups': groups, 'count': count, } return render(request, 'multitenancy/group-landing.html', context)
python
def group_selection(request): """Allow user to select a TenantGroup if they have more than one.""" groups = get_user_groups(request.user) count = len(groups) if count == 1: # Redirect to the detail page for this group return redirect(groups[0]) context = { 'groups': groups, 'count': count, } return render(request, 'multitenancy/group-landing.html', context)
[ "def", "group_selection", "(", "request", ")", ":", "groups", "=", "get_user_groups", "(", "request", ".", "user", ")", "count", "=", "len", "(", "groups", ")", "if", "count", "==", "1", ":", "# Redirect to the detail page for this group", "return", "redirect", "(", "groups", "[", "0", "]", ")", "context", "=", "{", "'groups'", ":", "groups", ",", "'count'", ":", "count", ",", "}", "return", "render", "(", "request", ",", "'multitenancy/group-landing.html'", ",", "context", ")" ]
Allow user to select a TenantGroup if they have more than one.
[ "Allow", "user", "to", "select", "a", "TenantGroup", "if", "they", "have", "more", "than", "one", "." ]
121bd0a628e691a88aade2e10045cba43af2dfcb
https://github.com/theirc/rapidsms-multitenancy/blob/121bd0a628e691a88aade2e10045cba43af2dfcb/multitenancy/views.py#L8-L20
245,207
theirc/rapidsms-multitenancy
multitenancy/views.py
group_dashboard
def group_dashboard(request, group_slug): """Dashboard for managing a TenantGroup.""" groups = get_user_groups(request.user) group = get_object_or_404(groups, slug=group_slug) tenants = get_user_tenants(request.user, group) can_edit_group = request.user.has_perm('multitenancy.change_tenantgroup', group) count = len(tenants) if count == 1: # Redirect to the detail page for this tenant return redirect(tenants[0]) context = { 'group': group, 'tenants': tenants, 'count': count, 'can_edit_group': can_edit_group, } return render(request, 'multitenancy/group-detail.html', context)
python
def group_dashboard(request, group_slug): """Dashboard for managing a TenantGroup.""" groups = get_user_groups(request.user) group = get_object_or_404(groups, slug=group_slug) tenants = get_user_tenants(request.user, group) can_edit_group = request.user.has_perm('multitenancy.change_tenantgroup', group) count = len(tenants) if count == 1: # Redirect to the detail page for this tenant return redirect(tenants[0]) context = { 'group': group, 'tenants': tenants, 'count': count, 'can_edit_group': can_edit_group, } return render(request, 'multitenancy/group-detail.html', context)
[ "def", "group_dashboard", "(", "request", ",", "group_slug", ")", ":", "groups", "=", "get_user_groups", "(", "request", ".", "user", ")", "group", "=", "get_object_or_404", "(", "groups", ",", "slug", "=", "group_slug", ")", "tenants", "=", "get_user_tenants", "(", "request", ".", "user", ",", "group", ")", "can_edit_group", "=", "request", ".", "user", ".", "has_perm", "(", "'multitenancy.change_tenantgroup'", ",", "group", ")", "count", "=", "len", "(", "tenants", ")", "if", "count", "==", "1", ":", "# Redirect to the detail page for this tenant", "return", "redirect", "(", "tenants", "[", "0", "]", ")", "context", "=", "{", "'group'", ":", "group", ",", "'tenants'", ":", "tenants", ",", "'count'", ":", "count", ",", "'can_edit_group'", ":", "can_edit_group", ",", "}", "return", "render", "(", "request", ",", "'multitenancy/group-detail.html'", ",", "context", ")" ]
Dashboard for managing a TenantGroup.
[ "Dashboard", "for", "managing", "a", "TenantGroup", "." ]
121bd0a628e691a88aade2e10045cba43af2dfcb
https://github.com/theirc/rapidsms-multitenancy/blob/121bd0a628e691a88aade2e10045cba43af2dfcb/multitenancy/views.py#L24-L40
245,208
theirc/rapidsms-multitenancy
multitenancy/views.py
tenant_dashboard
def tenant_dashboard(request, group_slug, tenant_slug): """Dashboard for managing a tenant.""" groups = get_user_groups(request.user) group = get_object_or_404(groups, slug=group_slug) tenants = get_user_tenants(request.user, group) tenant = get_object_or_404(tenants, slug=tenant_slug) can_edit_tenant = request.user.has_perm('multitenancy.change_tenant', tenant) context = { 'group': group, 'tenant': tenant, 'can_edit_tenant': can_edit_tenant, } return render(request, 'multitenancy/tenant-detail.html', context)
python
def tenant_dashboard(request, group_slug, tenant_slug): """Dashboard for managing a tenant.""" groups = get_user_groups(request.user) group = get_object_or_404(groups, slug=group_slug) tenants = get_user_tenants(request.user, group) tenant = get_object_or_404(tenants, slug=tenant_slug) can_edit_tenant = request.user.has_perm('multitenancy.change_tenant', tenant) context = { 'group': group, 'tenant': tenant, 'can_edit_tenant': can_edit_tenant, } return render(request, 'multitenancy/tenant-detail.html', context)
[ "def", "tenant_dashboard", "(", "request", ",", "group_slug", ",", "tenant_slug", ")", ":", "groups", "=", "get_user_groups", "(", "request", ".", "user", ")", "group", "=", "get_object_or_404", "(", "groups", ",", "slug", "=", "group_slug", ")", "tenants", "=", "get_user_tenants", "(", "request", ".", "user", ",", "group", ")", "tenant", "=", "get_object_or_404", "(", "tenants", ",", "slug", "=", "tenant_slug", ")", "can_edit_tenant", "=", "request", ".", "user", ".", "has_perm", "(", "'multitenancy.change_tenant'", ",", "tenant", ")", "context", "=", "{", "'group'", ":", "group", ",", "'tenant'", ":", "tenant", ",", "'can_edit_tenant'", ":", "can_edit_tenant", ",", "}", "return", "render", "(", "request", ",", "'multitenancy/tenant-detail.html'", ",", "context", ")" ]
Dashboard for managing a tenant.
[ "Dashboard", "for", "managing", "a", "tenant", "." ]
121bd0a628e691a88aade2e10045cba43af2dfcb
https://github.com/theirc/rapidsms-multitenancy/blob/121bd0a628e691a88aade2e10045cba43af2dfcb/multitenancy/views.py#L44-L56
245,209
pip-services3-python/pip-services3-components-python
pip_services3_components/connect/MemoryDiscovery.py
MemoryDiscovery.read_connections
def read_connections(self, connections): """ Reads connections from configuration parameters. Each section represents an individual Connectionparams :param connections: configuration parameters to be read """ del self._items[:] for key in connections.get_key_names(): item = DiscoveryItem() item.key = key value = connections.get_as_nullable_string(key) item.connection = ConnectionParams.from_string(value) self._items.append(item)
python
def read_connections(self, connections): """ Reads connections from configuration parameters. Each section represents an individual Connectionparams :param connections: configuration parameters to be read """ del self._items[:] for key in connections.get_key_names(): item = DiscoveryItem() item.key = key value = connections.get_as_nullable_string(key) item.connection = ConnectionParams.from_string(value) self._items.append(item)
[ "def", "read_connections", "(", "self", ",", "connections", ")", ":", "del", "self", ".", "_items", "[", ":", "]", "for", "key", "in", "connections", ".", "get_key_names", "(", ")", ":", "item", "=", "DiscoveryItem", "(", ")", "item", ".", "key", "=", "key", "value", "=", "connections", ".", "get_as_nullable_string", "(", "key", ")", "item", ".", "connection", "=", "ConnectionParams", ".", "from_string", "(", "value", ")", "self", ".", "_items", ".", "append", "(", "item", ")" ]
Reads connections from configuration parameters. Each section represents an individual Connectionparams :param connections: configuration parameters to be read
[ "Reads", "connections", "from", "configuration", "parameters", ".", "Each", "section", "represents", "an", "individual", "Connectionparams" ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/connect/MemoryDiscovery.py#L67-L80
245,210
pip-services3-python/pip-services3-components-python
pip_services3_components/connect/MemoryDiscovery.py
MemoryDiscovery.register
def register(self, correlation_id, key, connection): """ Registers connection parameters into the discovery service. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the connection parameters. :param connection: a connection to be registered. """ item = DiscoveryItem() item.key = key item.connection = connection self._items.append(item)
python
def register(self, correlation_id, key, connection): """ Registers connection parameters into the discovery service. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the connection parameters. :param connection: a connection to be registered. """ item = DiscoveryItem() item.key = key item.connection = connection self._items.append(item)
[ "def", "register", "(", "self", ",", "correlation_id", ",", "key", ",", "connection", ")", ":", "item", "=", "DiscoveryItem", "(", ")", "item", ".", "key", "=", "key", "item", ".", "connection", "=", "connection", "self", ".", "_items", ".", "append", "(", "item", ")" ]
Registers connection parameters into the discovery service. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the connection parameters. :param connection: a connection to be registered.
[ "Registers", "connection", "parameters", "into", "the", "discovery", "service", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/connect/MemoryDiscovery.py#L82-L95
245,211
pip-services3-python/pip-services3-components-python
pip_services3_components/connect/MemoryDiscovery.py
MemoryDiscovery.resolve_one
def resolve_one(self, correlation_id, key): """ Resolves a single connection parameters by its key. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the connection. :return: a resolved connection. """ connection = None for item in self._items: if item.key == key and item.connection != None: connection = item.connection break return connection
python
def resolve_one(self, correlation_id, key): """ Resolves a single connection parameters by its key. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the connection. :return: a resolved connection. """ connection = None for item in self._items: if item.key == key and item.connection != None: connection = item.connection break return connection
[ "def", "resolve_one", "(", "self", ",", "correlation_id", ",", "key", ")", ":", "connection", "=", "None", "for", "item", "in", "self", ".", "_items", ":", "if", "item", ".", "key", "==", "key", "and", "item", ".", "connection", "!=", "None", ":", "connection", "=", "item", ".", "connection", "break", "return", "connection" ]
Resolves a single connection parameters by its key. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the connection. :return: a resolved connection.
[ "Resolves", "a", "single", "connection", "parameters", "by", "its", "key", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/connect/MemoryDiscovery.py#L97-L112
245,212
nefarioustim/parker
parker/stringops.py
generate_chunks
def generate_chunks(string, num_chars): """Yield num_chars-character chunks from string.""" for start in range(0, len(string), num_chars): yield string[start:start+num_chars]
python
def generate_chunks(string, num_chars): """Yield num_chars-character chunks from string.""" for start in range(0, len(string), num_chars): yield string[start:start+num_chars]
[ "def", "generate_chunks", "(", "string", ",", "num_chars", ")", ":", "for", "start", "in", "range", "(", "0", ",", "len", "(", "string", ")", ",", "num_chars", ")", ":", "yield", "string", "[", "start", ":", "start", "+", "num_chars", "]" ]
Yield num_chars-character chunks from string.
[ "Yield", "num_chars", "-", "character", "chunks", "from", "string", "." ]
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/stringops.py#L5-L8
245,213
rosenbrockc/acorn
acorn/importer.py
_load_package_config
def _load_package_config(reload_=False): """Loads the package configurations from the global `acorn.cfg` file. """ global _packages from acorn.config import settings packset = settings("acorn", reload_) if packset.has_section("acorn.packages"): for package, value in packset.items("acorn.packages"): _packages[package] = value.strip() == "1"
python
def _load_package_config(reload_=False): """Loads the package configurations from the global `acorn.cfg` file. """ global _packages from acorn.config import settings packset = settings("acorn", reload_) if packset.has_section("acorn.packages"): for package, value in packset.items("acorn.packages"): _packages[package] = value.strip() == "1"
[ "def", "_load_package_config", "(", "reload_", "=", "False", ")", ":", "global", "_packages", "from", "acorn", ".", "config", "import", "settings", "packset", "=", "settings", "(", "\"acorn\"", ",", "reload_", ")", "if", "packset", ".", "has_section", "(", "\"acorn.packages\"", ")", ":", "for", "package", ",", "value", "in", "packset", ".", "items", "(", "\"acorn.packages\"", ")", ":", "_packages", "[", "package", "]", "=", "value", ".", "strip", "(", ")", "==", "\"1\"" ]
Loads the package configurations from the global `acorn.cfg` file.
[ "Loads", "the", "package", "configurations", "from", "the", "global", "acorn", ".", "cfg", "file", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/importer.py#L14-L22
245,214
rosenbrockc/acorn
acorn/importer.py
load_decorate
def load_decorate(package): """Imports and decorates the package with the specified name. """ # We import the decoration logic from acorn and then overwrite the sys.module # for this package with the decorated, original pandas package. from acorn.logging.decoration import set_decorating, decorating #Before we do any imports, we need to set that we are decorating so that #everything works as if `acorn` wasn't even here. origdecor = decorating set_decorating(True) #If we try and import the module directly, we will get stuck in a loop; at #some point we must invoke the built-in module loader from python. We do #this by removing our sys.path hook. import sys from importlib import import_module apack = import_module(package) from acorn.logging.decoration import decorate decorate(apack) sys.modules["acorn.{}".format(package)] = apack #Set the decoration back to what it was. from acorn.logging.decoration import set_decorating set_decorating(origdecor) return apack
python
def load_decorate(package): """Imports and decorates the package with the specified name. """ # We import the decoration logic from acorn and then overwrite the sys.module # for this package with the decorated, original pandas package. from acorn.logging.decoration import set_decorating, decorating #Before we do any imports, we need to set that we are decorating so that #everything works as if `acorn` wasn't even here. origdecor = decorating set_decorating(True) #If we try and import the module directly, we will get stuck in a loop; at #some point we must invoke the built-in module loader from python. We do #this by removing our sys.path hook. import sys from importlib import import_module apack = import_module(package) from acorn.logging.decoration import decorate decorate(apack) sys.modules["acorn.{}".format(package)] = apack #Set the decoration back to what it was. from acorn.logging.decoration import set_decorating set_decorating(origdecor) return apack
[ "def", "load_decorate", "(", "package", ")", ":", "# We import the decoration logic from acorn and then overwrite the sys.module", "# for this package with the decorated, original pandas package.", "from", "acorn", ".", "logging", ".", "decoration", "import", "set_decorating", ",", "decorating", "#Before we do any imports, we need to set that we are decorating so that", "#everything works as if `acorn` wasn't even here.", "origdecor", "=", "decorating", "set_decorating", "(", "True", ")", "#If we try and import the module directly, we will get stuck in a loop; at", "#some point we must invoke the built-in module loader from python. We do", "#this by removing our sys.path hook.", "import", "sys", "from", "importlib", "import", "import_module", "apack", "=", "import_module", "(", "package", ")", "from", "acorn", ".", "logging", ".", "decoration", "import", "decorate", "decorate", "(", "apack", ")", "sys", ".", "modules", "[", "\"acorn.{}\"", ".", "format", "(", "package", ")", "]", "=", "apack", "#Set the decoration back to what it was.", "from", "acorn", ".", "logging", ".", "decoration", "import", "set_decorating", "set_decorating", "(", "origdecor", ")", "return", "apack" ]
Imports and decorates the package with the specified name.
[ "Imports", "and", "decorates", "the", "package", "with", "the", "specified", "name", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/importer.py#L73-L100
245,215
RonenNess/Fileter
fileter/iterators/remove_files.py
RemoveFiles.process_file
def process_file(self, path, dryrun): """ Remove files and return filename. """ # if dryrun just return file path if dryrun: return path # remove and return file if self.__force or raw_input("Remove file '%s'? [y/N]" % path).lower() == "y": os.remove(path) return path
python
def process_file(self, path, dryrun): """ Remove files and return filename. """ # if dryrun just return file path if dryrun: return path # remove and return file if self.__force or raw_input("Remove file '%s'? [y/N]" % path).lower() == "y": os.remove(path) return path
[ "def", "process_file", "(", "self", ",", "path", ",", "dryrun", ")", ":", "# if dryrun just return file path", "if", "dryrun", ":", "return", "path", "# remove and return file", "if", "self", ".", "__force", "or", "raw_input", "(", "\"Remove file '%s'? [y/N]\"", "%", "path", ")", ".", "lower", "(", ")", "==", "\"y\"", ":", "os", ".", "remove", "(", "path", ")", "return", "path" ]
Remove files and return filename.
[ "Remove", "files", "and", "return", "filename", "." ]
5372221b4049d5d46a9926573b91af17681c81f3
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/iterators/remove_files.py#L27-L38
245,216
maxfischer2781/include
include/base/import_hook.py
BaseIncludeLoader.find_module
def find_module(self, fullname, path=None): """ Find the appropriate loader for module ``name`` :param fullname: ``__name__`` of the module to import :type fullname: str :param path: ``__path__`` of the *parent* package already imported :type path: str or None """ # path points to the top-level package path if any # and we can only import sub-modules/-packages if path is None: return if fullname.startswith(self.module_prefix): return self else: return None
python
def find_module(self, fullname, path=None): """ Find the appropriate loader for module ``name`` :param fullname: ``__name__`` of the module to import :type fullname: str :param path: ``__path__`` of the *parent* package already imported :type path: str or None """ # path points to the top-level package path if any # and we can only import sub-modules/-packages if path is None: return if fullname.startswith(self.module_prefix): return self else: return None
[ "def", "find_module", "(", "self", ",", "fullname", ",", "path", "=", "None", ")", ":", "# path points to the top-level package path if any", "# and we can only import sub-modules/-packages", "if", "path", "is", "None", ":", "return", "if", "fullname", ".", "startswith", "(", "self", ".", "module_prefix", ")", ":", "return", "self", "else", ":", "return", "None" ]
Find the appropriate loader for module ``name`` :param fullname: ``__name__`` of the module to import :type fullname: str :param path: ``__path__`` of the *parent* package already imported :type path: str or None
[ "Find", "the", "appropriate", "loader", "for", "module", "name" ]
d8b0404f4996b6abcd39fdebf282b31fad8bb6f5
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/base/import_hook.py#L69-L85
245,217
sprockets/sprockets
sprockets/cli.py
CLI.run
def run(self): """Evaluate the command line arguments, performing the appropriate actions so the application can be started. """ # The list command prevents any other processing of args if self._args.list: self._print_installed_apps(self._args.controller) sys.exit(0) # If app is not specified at this point, raise an error if not self._args.application: sys.stderr.write('\nerror: application not specified\n\n') self._arg_parser.print_help() sys.exit(-1) # If it's a registered app reference by name, get the module name app_module = self._get_application_module(self._args.controller, self._args.application) # Configure logging based upon the flags self._configure_logging(app_module, self._args.verbose, self._args.syslog) # Try and run the controller try: self._controllers[self._args.controller].main(app_module, self._args) except TypeError as error: sys.stderr.write('error: could not start the %s controller for %s' ': %s\n\n' % (self._args.controller, app_module, str(error))) sys.exit(-1)
python
def run(self): """Evaluate the command line arguments, performing the appropriate actions so the application can be started. """ # The list command prevents any other processing of args if self._args.list: self._print_installed_apps(self._args.controller) sys.exit(0) # If app is not specified at this point, raise an error if not self._args.application: sys.stderr.write('\nerror: application not specified\n\n') self._arg_parser.print_help() sys.exit(-1) # If it's a registered app reference by name, get the module name app_module = self._get_application_module(self._args.controller, self._args.application) # Configure logging based upon the flags self._configure_logging(app_module, self._args.verbose, self._args.syslog) # Try and run the controller try: self._controllers[self._args.controller].main(app_module, self._args) except TypeError as error: sys.stderr.write('error: could not start the %s controller for %s' ': %s\n\n' % (self._args.controller, app_module, str(error))) sys.exit(-1)
[ "def", "run", "(", "self", ")", ":", "# The list command prevents any other processing of args", "if", "self", ".", "_args", ".", "list", ":", "self", ".", "_print_installed_apps", "(", "self", ".", "_args", ".", "controller", ")", "sys", ".", "exit", "(", "0", ")", "# If app is not specified at this point, raise an error", "if", "not", "self", ".", "_args", ".", "application", ":", "sys", ".", "stderr", ".", "write", "(", "'\\nerror: application not specified\\n\\n'", ")", "self", ".", "_arg_parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "-", "1", ")", "# If it's a registered app reference by name, get the module name", "app_module", "=", "self", ".", "_get_application_module", "(", "self", ".", "_args", ".", "controller", ",", "self", ".", "_args", ".", "application", ")", "# Configure logging based upon the flags", "self", ".", "_configure_logging", "(", "app_module", ",", "self", ".", "_args", ".", "verbose", ",", "self", ".", "_args", ".", "syslog", ")", "# Try and run the controller", "try", ":", "self", ".", "_controllers", "[", "self", ".", "_args", ".", "controller", "]", ".", "main", "(", "app_module", ",", "self", ".", "_args", ")", "except", "TypeError", "as", "error", ":", "sys", ".", "stderr", ".", "write", "(", "'error: could not start the %s controller for %s'", "': %s\\n\\n'", "%", "(", "self", ".", "_args", ".", "controller", ",", "app_module", ",", "str", "(", "error", ")", ")", ")", "sys", ".", "exit", "(", "-", "1", ")" ]
Evaluate the command line arguments, performing the appropriate actions so the application can be started.
[ "Evaluate", "the", "command", "line", "arguments", "performing", "the", "appropriate", "actions", "so", "the", "application", "can", "be", "started", "." ]
089dbaf04da54afd95645fce31f4ff9c8bdd8fae
https://github.com/sprockets/sprockets/blob/089dbaf04da54afd95645fce31f4ff9c8bdd8fae/sprockets/cli.py#L85-L119
245,218
sprockets/sprockets
sprockets/cli.py
CLI._add_cli_args
def _add_cli_args(self): """Add the cli arguments to the argument parser.""" # Optional cli arguments self._arg_parser.add_argument('-l', '--list', action='store_true', help='List installed sprockets apps') self._arg_parser.add_argument('-s', '--syslog', action='store_true', help='Log to syslog') self._arg_parser.add_argument('-v', '--verbose', action='count', help=('Verbose logging output, use -vv ' 'for DEBUG level logging')) self._arg_parser.add_argument('--version', action='version', version='sprockets v%s ' % __version__) # Controller sub-parser subparsers = self._arg_parser.add_subparsers(dest='controller', help=DESCRIPTION) # Iterate through the controllers and add their cli arguments for key in self._controllers: help_text = self._get_controller_help(key) sub_parser = subparsers.add_parser(key, help=help_text) try: self._controllers[key].add_cli_arguments(sub_parser) except AttributeError: LOGGER.debug('%s missing add_cli_arguments()', key) # The application argument self._arg_parser.add_argument('application', action="store", help='The sprockets app to run')
python
def _add_cli_args(self): """Add the cli arguments to the argument parser.""" # Optional cli arguments self._arg_parser.add_argument('-l', '--list', action='store_true', help='List installed sprockets apps') self._arg_parser.add_argument('-s', '--syslog', action='store_true', help='Log to syslog') self._arg_parser.add_argument('-v', '--verbose', action='count', help=('Verbose logging output, use -vv ' 'for DEBUG level logging')) self._arg_parser.add_argument('--version', action='version', version='sprockets v%s ' % __version__) # Controller sub-parser subparsers = self._arg_parser.add_subparsers(dest='controller', help=DESCRIPTION) # Iterate through the controllers and add their cli arguments for key in self._controllers: help_text = self._get_controller_help(key) sub_parser = subparsers.add_parser(key, help=help_text) try: self._controllers[key].add_cli_arguments(sub_parser) except AttributeError: LOGGER.debug('%s missing add_cli_arguments()', key) # The application argument self._arg_parser.add_argument('application', action="store", help='The sprockets app to run')
[ "def", "_add_cli_args", "(", "self", ")", ":", "# Optional cli arguments", "self", ".", "_arg_parser", ".", "add_argument", "(", "'-l'", ",", "'--list'", ",", "action", "=", "'store_true'", ",", "help", "=", "'List installed sprockets apps'", ")", "self", ".", "_arg_parser", ".", "add_argument", "(", "'-s'", ",", "'--syslog'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Log to syslog'", ")", "self", ".", "_arg_parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "action", "=", "'count'", ",", "help", "=", "(", "'Verbose logging output, use -vv '", "'for DEBUG level logging'", ")", ")", "self", ".", "_arg_parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "'sprockets v%s '", "%", "__version__", ")", "# Controller sub-parser", "subparsers", "=", "self", ".", "_arg_parser", ".", "add_subparsers", "(", "dest", "=", "'controller'", ",", "help", "=", "DESCRIPTION", ")", "# Iterate through the controllers and add their cli arguments", "for", "key", "in", "self", ".", "_controllers", ":", "help_text", "=", "self", ".", "_get_controller_help", "(", "key", ")", "sub_parser", "=", "subparsers", ".", "add_parser", "(", "key", ",", "help", "=", "help_text", ")", "try", ":", "self", ".", "_controllers", "[", "key", "]", ".", "add_cli_arguments", "(", "sub_parser", ")", "except", "AttributeError", ":", "LOGGER", ".", "debug", "(", "'%s missing add_cli_arguments()'", ",", "key", ")", "# The application argument", "self", ".", "_arg_parser", ".", "add_argument", "(", "'application'", ",", "action", "=", "\"store\"", ",", "help", "=", "'The sprockets app to run'", ")" ]
Add the cli arguments to the argument parser.
[ "Add", "the", "cli", "arguments", "to", "the", "argument", "parser", "." ]
089dbaf04da54afd95645fce31f4ff9c8bdd8fae
https://github.com/sprockets/sprockets/blob/089dbaf04da54afd95645fce31f4ff9c8bdd8fae/sprockets/cli.py#L121-L158
245,219
sprockets/sprockets
sprockets/cli.py
CLI._configure_logging
def _configure_logging(application, verbosity=0, syslog=False): """Configure logging for the application, setting the appropriate verbosity and adding syslog if it's enabled. :param str application: The application module/package name :param int verbosity: 1 == INFO, 2 == DEBUG :param bool syslog: Enable the syslog handler """ # Create a new copy of the logging config that will be modified config = dict(LOGGING) # Increase the logging verbosity if verbosity == 1: config['loggers']['sprockets']['level'] = logging.INFO elif verbosity == 2: config['loggers']['sprockets']['level'] = logging.DEBUG # Add syslog if it's enabled if syslog: config['loggers']['sprockets']['handlers'].append('syslog') # Copy the sprockets logger to the application config['loggers'][application] = dict(config['loggers']['sprockets']) # Configure logging logging_config.dictConfig(config)
python
def _configure_logging(application, verbosity=0, syslog=False): """Configure logging for the application, setting the appropriate verbosity and adding syslog if it's enabled. :param str application: The application module/package name :param int verbosity: 1 == INFO, 2 == DEBUG :param bool syslog: Enable the syslog handler """ # Create a new copy of the logging config that will be modified config = dict(LOGGING) # Increase the logging verbosity if verbosity == 1: config['loggers']['sprockets']['level'] = logging.INFO elif verbosity == 2: config['loggers']['sprockets']['level'] = logging.DEBUG # Add syslog if it's enabled if syslog: config['loggers']['sprockets']['handlers'].append('syslog') # Copy the sprockets logger to the application config['loggers'][application] = dict(config['loggers']['sprockets']) # Configure logging logging_config.dictConfig(config)
[ "def", "_configure_logging", "(", "application", ",", "verbosity", "=", "0", ",", "syslog", "=", "False", ")", ":", "# Create a new copy of the logging config that will be modified", "config", "=", "dict", "(", "LOGGING", ")", "# Increase the logging verbosity", "if", "verbosity", "==", "1", ":", "config", "[", "'loggers'", "]", "[", "'sprockets'", "]", "[", "'level'", "]", "=", "logging", ".", "INFO", "elif", "verbosity", "==", "2", ":", "config", "[", "'loggers'", "]", "[", "'sprockets'", "]", "[", "'level'", "]", "=", "logging", ".", "DEBUG", "# Add syslog if it's enabled", "if", "syslog", ":", "config", "[", "'loggers'", "]", "[", "'sprockets'", "]", "[", "'handlers'", "]", ".", "append", "(", "'syslog'", ")", "# Copy the sprockets logger to the application", "config", "[", "'loggers'", "]", "[", "application", "]", "=", "dict", "(", "config", "[", "'loggers'", "]", "[", "'sprockets'", "]", ")", "# Configure logging", "logging_config", ".", "dictConfig", "(", "config", ")" ]
Configure logging for the application, setting the appropriate verbosity and adding syslog if it's enabled. :param str application: The application module/package name :param int verbosity: 1 == INFO, 2 == DEBUG :param bool syslog: Enable the syslog handler
[ "Configure", "logging", "for", "the", "application", "setting", "the", "appropriate", "verbosity", "and", "adding", "syslog", "if", "it", "s", "enabled", "." ]
089dbaf04da54afd95645fce31f4ff9c8bdd8fae
https://github.com/sprockets/sprockets/blob/089dbaf04da54afd95645fce31f4ff9c8bdd8fae/sprockets/cli.py#L161-L187
245,220
sprockets/sprockets
sprockets/cli.py
CLI._get_application_module
def _get_application_module(self, controller, application): """Return the module for an application. If it's a entry-point registered application name, return the module name from the entry points data. If not, the passed in application name is returned. :param str controller: The controller type :param str application: The application name or module :rtype: str """ for pkg in self._get_applications(controller): if pkg.name == application: return pkg.module_name return application
python
def _get_application_module(self, controller, application): """Return the module for an application. If it's a entry-point registered application name, return the module name from the entry points data. If not, the passed in application name is returned. :param str controller: The controller type :param str application: The application name or module :rtype: str """ for pkg in self._get_applications(controller): if pkg.name == application: return pkg.module_name return application
[ "def", "_get_application_module", "(", "self", ",", "controller", ",", "application", ")", ":", "for", "pkg", "in", "self", ".", "_get_applications", "(", "controller", ")", ":", "if", "pkg", ".", "name", "==", "application", ":", "return", "pkg", ".", "module_name", "return", "application" ]
Return the module for an application. If it's a entry-point registered application name, return the module name from the entry points data. If not, the passed in application name is returned. :param str controller: The controller type :param str application: The application name or module :rtype: str
[ "Return", "the", "module", "for", "an", "application", ".", "If", "it", "s", "a", "entry", "-", "point", "registered", "application", "name", "return", "the", "module", "name", "from", "the", "entry", "points", "data", ".", "If", "not", "the", "passed", "in", "application", "name", "is", "returned", "." ]
089dbaf04da54afd95645fce31f4ff9c8bdd8fae
https://github.com/sprockets/sprockets/blob/089dbaf04da54afd95645fce31f4ff9c8bdd8fae/sprockets/cli.py#L189-L202
245,221
sprockets/sprockets
sprockets/cli.py
CLI._get_controllers
def _get_controllers(self): """Iterate through the installed controller entry points and import the module and assign the handle to the CLI._controllers dict. :return: dict """ controllers = dict() for pkg in pkg_resources.iter_entry_points(group=self.CONTROLLERS): LOGGER.debug('Loading %s controller', pkg.name) controllers[pkg.name] = importlib.import_module(pkg.module_name) return controllers
python
def _get_controllers(self): """Iterate through the installed controller entry points and import the module and assign the handle to the CLI._controllers dict. :return: dict """ controllers = dict() for pkg in pkg_resources.iter_entry_points(group=self.CONTROLLERS): LOGGER.debug('Loading %s controller', pkg.name) controllers[pkg.name] = importlib.import_module(pkg.module_name) return controllers
[ "def", "_get_controllers", "(", "self", ")", ":", "controllers", "=", "dict", "(", ")", "for", "pkg", "in", "pkg_resources", ".", "iter_entry_points", "(", "group", "=", "self", ".", "CONTROLLERS", ")", ":", "LOGGER", ".", "debug", "(", "'Loading %s controller'", ",", "pkg", ".", "name", ")", "controllers", "[", "pkg", ".", "name", "]", "=", "importlib", ".", "import_module", "(", "pkg", ".", "module_name", ")", "return", "controllers" ]
Iterate through the installed controller entry points and import the module and assign the handle to the CLI._controllers dict. :return: dict
[ "Iterate", "through", "the", "installed", "controller", "entry", "points", "and", "import", "the", "module", "and", "assign", "the", "handle", "to", "the", "CLI", ".", "_controllers", "dict", "." ]
089dbaf04da54afd95645fce31f4ff9c8bdd8fae
https://github.com/sprockets/sprockets/blob/089dbaf04da54afd95645fce31f4ff9c8bdd8fae/sprockets/cli.py#L225-L236
245,222
sprockets/sprockets
sprockets/cli.py
CLI._get_controller_help
def _get_controller_help(self, controller): """Return the value of the HELP attribute for a controller that should describe the functionality of the controller. :rtype: str|None """ if hasattr(self._controllers[controller], 'HELP'): return self._controllers[controller].HELP return None
python
def _get_controller_help(self, controller): """Return the value of the HELP attribute for a controller that should describe the functionality of the controller. :rtype: str|None """ if hasattr(self._controllers[controller], 'HELP'): return self._controllers[controller].HELP return None
[ "def", "_get_controller_help", "(", "self", ",", "controller", ")", ":", "if", "hasattr", "(", "self", ".", "_controllers", "[", "controller", "]", ",", "'HELP'", ")", ":", "return", "self", ".", "_controllers", "[", "controller", "]", ".", "HELP", "return", "None" ]
Return the value of the HELP attribute for a controller that should describe the functionality of the controller. :rtype: str|None
[ "Return", "the", "value", "of", "the", "HELP", "attribute", "for", "a", "controller", "that", "should", "describe", "the", "functionality", "of", "the", "controller", "." ]
089dbaf04da54afd95645fce31f4ff9c8bdd8fae
https://github.com/sprockets/sprockets/blob/089dbaf04da54afd95645fce31f4ff9c8bdd8fae/sprockets/cli.py#L238-L247
245,223
sprockets/sprockets
sprockets/cli.py
CLI._print_installed_apps
def _print_installed_apps(self, controller): """Print out a list of installed sprockets applications :param str controller: The name of the controller to get apps for """ print('\nInstalled Sprockets %s Apps\n' % controller.upper()) print("{0:<25} {1:>25}".format('Name', 'Module')) print(string.ljust('', 51, '-')) for app in self._get_applications(controller): print('{0:<25} {1:>25}'.format(app.name, '(%s)' % app.module_name)) print('')
python
def _print_installed_apps(self, controller): """Print out a list of installed sprockets applications :param str controller: The name of the controller to get apps for """ print('\nInstalled Sprockets %s Apps\n' % controller.upper()) print("{0:<25} {1:>25}".format('Name', 'Module')) print(string.ljust('', 51, '-')) for app in self._get_applications(controller): print('{0:<25} {1:>25}'.format(app.name, '(%s)' % app.module_name)) print('')
[ "def", "_print_installed_apps", "(", "self", ",", "controller", ")", ":", "print", "(", "'\\nInstalled Sprockets %s Apps\\n'", "%", "controller", ".", "upper", "(", ")", ")", "print", "(", "\"{0:<25} {1:>25}\"", ".", "format", "(", "'Name'", ",", "'Module'", ")", ")", "print", "(", "string", ".", "ljust", "(", "''", ",", "51", ",", "'-'", ")", ")", "for", "app", "in", "self", ".", "_get_applications", "(", "controller", ")", ":", "print", "(", "'{0:<25} {1:>25}'", ".", "format", "(", "app", ".", "name", ",", "'(%s)'", "%", "app", ".", "module_name", ")", ")", "print", "(", "''", ")" ]
Print out a list of installed sprockets applications :param str controller: The name of the controller to get apps for
[ "Print", "out", "a", "list", "of", "installed", "sprockets", "applications" ]
089dbaf04da54afd95645fce31f4ff9c8bdd8fae
https://github.com/sprockets/sprockets/blob/089dbaf04da54afd95645fce31f4ff9c8bdd8fae/sprockets/cli.py#L249-L259
245,224
kankiri/pabiana
pabiana/abcs.py
Node.rslv
def rslv(self, interface: str, name: str=None) -> Tuple[str, int, Optional[str]]: """Return the IP address, port and optionally host IP for one of this Nodes interfaces.""" if name is None: name = self.name key = '{}-{}'.format(name, interface) host = None if 'host' in self.interfaces[key]: host = self.interfaces[key]['host'] return self.interfaces[key]['ip'], self.interfaces[key]['port'], host
python
def rslv(self, interface: str, name: str=None) -> Tuple[str, int, Optional[str]]: """Return the IP address, port and optionally host IP for one of this Nodes interfaces.""" if name is None: name = self.name key = '{}-{}'.format(name, interface) host = None if 'host' in self.interfaces[key]: host = self.interfaces[key]['host'] return self.interfaces[key]['ip'], self.interfaces[key]['port'], host
[ "def", "rslv", "(", "self", ",", "interface", ":", "str", ",", "name", ":", "str", "=", "None", ")", "->", "Tuple", "[", "str", ",", "int", ",", "Optional", "[", "str", "]", "]", ":", "if", "name", "is", "None", ":", "name", "=", "self", ".", "name", "key", "=", "'{}-{}'", ".", "format", "(", "name", ",", "interface", ")", "host", "=", "None", "if", "'host'", "in", "self", ".", "interfaces", "[", "key", "]", ":", "host", "=", "self", ".", "interfaces", "[", "key", "]", "[", "'host'", "]", "return", "self", ".", "interfaces", "[", "key", "]", "[", "'ip'", "]", ",", "self", ".", "interfaces", "[", "key", "]", "[", "'port'", "]", ",", "host" ]
Return the IP address, port and optionally host IP for one of this Nodes interfaces.
[ "Return", "the", "IP", "address", "port", "and", "optionally", "host", "IP", "for", "one", "of", "this", "Nodes", "interfaces", "." ]
74acfdd81e2a1cc411c37b9ee3d6905ce4b1a39b
https://github.com/kankiri/pabiana/blob/74acfdd81e2a1cc411c37b9ee3d6905ce4b1a39b/pabiana/abcs.py#L23-L31
245,225
kankiri/pabiana
pabiana/abcs.py
Area.trigger
def trigger(self, target: str, trigger: str, parameters: Dict[str, Any]={}): """Calls the specified Trigger of another Area with the optionally given parameters. Args: target: The name of the target Area. trigger: The name of the Trigger. parameters: The parameters of the function call. """ pass
python
def trigger(self, target: str, trigger: str, parameters: Dict[str, Any]={}): """Calls the specified Trigger of another Area with the optionally given parameters. Args: target: The name of the target Area. trigger: The name of the Trigger. parameters: The parameters of the function call. """ pass
[ "def", "trigger", "(", "self", ",", "target", ":", "str", ",", "trigger", ":", "str", ",", "parameters", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "}", ")", ":", "pass" ]
Calls the specified Trigger of another Area with the optionally given parameters. Args: target: The name of the target Area. trigger: The name of the Trigger. parameters: The parameters of the function call.
[ "Calls", "the", "specified", "Trigger", "of", "another", "Area", "with", "the", "optionally", "given", "parameters", "." ]
74acfdd81e2a1cc411c37b9ee3d6905ce4b1a39b
https://github.com/kankiri/pabiana/blob/74acfdd81e2a1cc411c37b9ee3d6905ce4b1a39b/pabiana/abcs.py#L69-L77
245,226
iskandr/tinytimer
microbench/__init__.py
benchmark
def benchmark(f, n_repeats=3, warmup=True, name=""): """ Run the given function f repeatedly, return the average elapsed time. """ if warmup: f() total_time = 0 for i in range(n_repeats): iter_name = "%s (iter #%d)" % (name, i + 1,) with Timer(iter_name) as t: f() total_time += t.elapsed return total_time / n_repeats
python
def benchmark(f, n_repeats=3, warmup=True, name=""): """ Run the given function f repeatedly, return the average elapsed time. """ if warmup: f() total_time = 0 for i in range(n_repeats): iter_name = "%s (iter #%d)" % (name, i + 1,) with Timer(iter_name) as t: f() total_time += t.elapsed return total_time / n_repeats
[ "def", "benchmark", "(", "f", ",", "n_repeats", "=", "3", ",", "warmup", "=", "True", ",", "name", "=", "\"\"", ")", ":", "if", "warmup", ":", "f", "(", ")", "total_time", "=", "0", "for", "i", "in", "range", "(", "n_repeats", ")", ":", "iter_name", "=", "\"%s (iter #%d)\"", "%", "(", "name", ",", "i", "+", "1", ",", ")", "with", "Timer", "(", "iter_name", ")", "as", "t", ":", "f", "(", ")", "total_time", "+=", "t", ".", "elapsed", "return", "total_time", "/", "n_repeats" ]
Run the given function f repeatedly, return the average elapsed time.
[ "Run", "the", "given", "function", "f", "repeatedly", "return", "the", "average", "elapsed", "time", "." ]
ddedd6099e51bdccb2c506f34d31ff08aef15685
https://github.com/iskandr/tinytimer/blob/ddedd6099e51bdccb2c506f34d31ff08aef15685/microbench/__init__.py#L41-L54
245,227
MakerReduxCorp/PLOD
PLOD/internal.py
convert_to_dict
def convert_to_dict(item): '''Examine an item of any type and return a true dictionary. If the item is already a dictionary, then the item is returned as-is. Easy. Otherwise, it attempts to interpret it. So far, this routine can handle: * a class, function, or anything with a .__dict__ entry * a legacy mongoEngine document (a class for MongoDb handling) * a list (index positions are used as keys) * a generic object that is iterable * a generic object with members .. versionadded:: 0.0.4 :param item: Any object such as a variable, instance, or function. :returns: A true dictionary. If unable to get convert 'item', then an empty dictionary '{}' is returned. ''' # get type actual_type = detect_type(item) # given the type, do conversion if actual_type=="dict": return item elif actual_type=="list": temp = {} ctr = 0 for entry in item: temp[ctr]=entry ctr += 1 return temp elif actual_type=="mongoengine": return item.__dict__['_data'] elif actual_type=="class": return item.__dict__ elif actual_type=="iterable_dict": # for a 'iterable_dict' create a real dictionary for a ALMOST-dict object. d = {} for key in item: # NO, you can't use iteritems(). The method might not exist. d[key] = item[key] return d elif actual_type=="object": tuples = getmembers(item) d = {} for (key, value) in tuples: d[key] = value return d return {}
python
def convert_to_dict(item): '''Examine an item of any type and return a true dictionary. If the item is already a dictionary, then the item is returned as-is. Easy. Otherwise, it attempts to interpret it. So far, this routine can handle: * a class, function, or anything with a .__dict__ entry * a legacy mongoEngine document (a class for MongoDb handling) * a list (index positions are used as keys) * a generic object that is iterable * a generic object with members .. versionadded:: 0.0.4 :param item: Any object such as a variable, instance, or function. :returns: A true dictionary. If unable to get convert 'item', then an empty dictionary '{}' is returned. ''' # get type actual_type = detect_type(item) # given the type, do conversion if actual_type=="dict": return item elif actual_type=="list": temp = {} ctr = 0 for entry in item: temp[ctr]=entry ctr += 1 return temp elif actual_type=="mongoengine": return item.__dict__['_data'] elif actual_type=="class": return item.__dict__ elif actual_type=="iterable_dict": # for a 'iterable_dict' create a real dictionary for a ALMOST-dict object. d = {} for key in item: # NO, you can't use iteritems(). The method might not exist. d[key] = item[key] return d elif actual_type=="object": tuples = getmembers(item) d = {} for (key, value) in tuples: d[key] = value return d return {}
[ "def", "convert_to_dict", "(", "item", ")", ":", "# get type", "actual_type", "=", "detect_type", "(", "item", ")", "# given the type, do conversion", "if", "actual_type", "==", "\"dict\"", ":", "return", "item", "elif", "actual_type", "==", "\"list\"", ":", "temp", "=", "{", "}", "ctr", "=", "0", "for", "entry", "in", "item", ":", "temp", "[", "ctr", "]", "=", "entry", "ctr", "+=", "1", "return", "temp", "elif", "actual_type", "==", "\"mongoengine\"", ":", "return", "item", ".", "__dict__", "[", "'_data'", "]", "elif", "actual_type", "==", "\"class\"", ":", "return", "item", ".", "__dict__", "elif", "actual_type", "==", "\"iterable_dict\"", ":", "# for a 'iterable_dict' create a real dictionary for a ALMOST-dict object.", "d", "=", "{", "}", "for", "key", "in", "item", ":", "# NO, you can't use iteritems(). The method might not exist.", "d", "[", "key", "]", "=", "item", "[", "key", "]", "return", "d", "elif", "actual_type", "==", "\"object\"", ":", "tuples", "=", "getmembers", "(", "item", ")", "d", "=", "{", "}", "for", "(", "key", ",", "value", ")", "in", "tuples", ":", "d", "[", "key", "]", "=", "value", "return", "d", "return", "{", "}" ]
Examine an item of any type and return a true dictionary. If the item is already a dictionary, then the item is returned as-is. Easy. Otherwise, it attempts to interpret it. So far, this routine can handle: * a class, function, or anything with a .__dict__ entry * a legacy mongoEngine document (a class for MongoDb handling) * a list (index positions are used as keys) * a generic object that is iterable * a generic object with members .. versionadded:: 0.0.4 :param item: Any object such as a variable, instance, or function. :returns: A true dictionary. If unable to get convert 'item', then an empty dictionary '{}' is returned.
[ "Examine", "an", "item", "of", "any", "type", "and", "return", "a", "true", "dictionary", "." ]
707502cd928e5be6bd5e46d7f6de7da0e188cf1e
https://github.com/MakerReduxCorp/PLOD/blob/707502cd928e5be6bd5e46d7f6de7da0e188cf1e/PLOD/internal.py#L22-L70
245,228
MakerReduxCorp/PLOD
PLOD/internal.py
do_op
def do_op(field, op, value): ''' used for comparisons ''' if op==NOOP: return True if field==None: if value==None: return True else: return False if value==None: return False if op==LESS: return (field < value) if op==LESSorEQUAL: return (field <= value) if op==GREATERorEQUAL: return (field >= value) if op==GREATER: return (field > value) # for the EQUAL and NOT_EQUAL conditions, additional factors are considered. # for EQUAL, # if they don't match AND the types don't match, # then the STR of the field and value is also tried if op==EQUAL: if (field == value): return True if type(field)==type(value): return False try: field = str(field) value = str(value) return (field == value) except: return False # for NOT_EQUAL, # if they match, then report False # if they don't match AND the types don't match, # then the STR equivalents are also tried. if op==NOT_EQUAL: if (field == value): return False if type(field)==type(value): return True try: field = str(field) value = str(value) return (field != value) except: return True return False
python
def do_op(field, op, value): ''' used for comparisons ''' if op==NOOP: return True if field==None: if value==None: return True else: return False if value==None: return False if op==LESS: return (field < value) if op==LESSorEQUAL: return (field <= value) if op==GREATERorEQUAL: return (field >= value) if op==GREATER: return (field > value) # for the EQUAL and NOT_EQUAL conditions, additional factors are considered. # for EQUAL, # if they don't match AND the types don't match, # then the STR of the field and value is also tried if op==EQUAL: if (field == value): return True if type(field)==type(value): return False try: field = str(field) value = str(value) return (field == value) except: return False # for NOT_EQUAL, # if they match, then report False # if they don't match AND the types don't match, # then the STR equivalents are also tried. if op==NOT_EQUAL: if (field == value): return False if type(field)==type(value): return True try: field = str(field) value = str(value) return (field != value) except: return True return False
[ "def", "do_op", "(", "field", ",", "op", ",", "value", ")", ":", "if", "op", "==", "NOOP", ":", "return", "True", "if", "field", "==", "None", ":", "if", "value", "==", "None", ":", "return", "True", "else", ":", "return", "False", "if", "value", "==", "None", ":", "return", "False", "if", "op", "==", "LESS", ":", "return", "(", "field", "<", "value", ")", "if", "op", "==", "LESSorEQUAL", ":", "return", "(", "field", "<=", "value", ")", "if", "op", "==", "GREATERorEQUAL", ":", "return", "(", "field", ">=", "value", ")", "if", "op", "==", "GREATER", ":", "return", "(", "field", ">", "value", ")", "# for the EQUAL and NOT_EQUAL conditions, additional factors are considered.", "# for EQUAL,", "# if they don't match AND the types don't match,", "# then the STR of the field and value is also tried", "if", "op", "==", "EQUAL", ":", "if", "(", "field", "==", "value", ")", ":", "return", "True", "if", "type", "(", "field", ")", "==", "type", "(", "value", ")", ":", "return", "False", "try", ":", "field", "=", "str", "(", "field", ")", "value", "=", "str", "(", "value", ")", "return", "(", "field", "==", "value", ")", "except", ":", "return", "False", "# for NOT_EQUAL,", "# if they match, then report False", "# if they don't match AND the types don't match,", "# then the STR equivalents are also tried.", "if", "op", "==", "NOT_EQUAL", ":", "if", "(", "field", "==", "value", ")", ":", "return", "False", "if", "type", "(", "field", ")", "==", "type", "(", "value", ")", ":", "return", "True", "try", ":", "field", "=", "str", "(", "field", ")", "value", "=", "str", "(", "value", ")", "return", "(", "field", "!=", "value", ")", "except", ":", "return", "True", "return", "False" ]
used for comparisons
[ "used", "for", "comparisons" ]
707502cd928e5be6bd5e46d7f6de7da0e188cf1e
https://github.com/MakerReduxCorp/PLOD/blob/707502cd928e5be6bd5e46d7f6de7da0e188cf1e/PLOD/internal.py#L212-L261
245,229
MakerReduxCorp/PLOD
PLOD/internal.py
get_value
def get_value(row, field_name): ''' Returns the value found in the field_name attribute of the row dictionary. ''' result = None dict_row = convert_to_dict(row) if detect_list(field_name): temp = row for field in field_name: dict_temp = convert_to_dict(temp) temp = dict_temp.get(field, None) result = temp else: result = dict_row.get(field_name, None) return result
python
def get_value(row, field_name): ''' Returns the value found in the field_name attribute of the row dictionary. ''' result = None dict_row = convert_to_dict(row) if detect_list(field_name): temp = row for field in field_name: dict_temp = convert_to_dict(temp) temp = dict_temp.get(field, None) result = temp else: result = dict_row.get(field_name, None) return result
[ "def", "get_value", "(", "row", ",", "field_name", ")", ":", "result", "=", "None", "dict_row", "=", "convert_to_dict", "(", "row", ")", "if", "detect_list", "(", "field_name", ")", ":", "temp", "=", "row", "for", "field", "in", "field_name", ":", "dict_temp", "=", "convert_to_dict", "(", "temp", ")", "temp", "=", "dict_temp", ".", "get", "(", "field", ",", "None", ")", "result", "=", "temp", "else", ":", "result", "=", "dict_row", ".", "get", "(", "field_name", ",", "None", ")", "return", "result" ]
Returns the value found in the field_name attribute of the row dictionary.
[ "Returns", "the", "value", "found", "in", "the", "field_name", "attribute", "of", "the", "row", "dictionary", "." ]
707502cd928e5be6bd5e46d7f6de7da0e188cf1e
https://github.com/MakerReduxCorp/PLOD/blob/707502cd928e5be6bd5e46d7f6de7da0e188cf1e/PLOD/internal.py#L280-L294
245,230
MakerReduxCorp/PLOD
PLOD/internal.py
select
def select(table, index_track, field_name, op, value, includeMissing): '''Modifies the table and index_track lists based on the comparison. ''' result = [] result_index = [] counter = 0 for row in table: if detect_fields(field_name, convert_to_dict(row)): final_value = get_value(row, field_name) if do_op(final_value, op, value): result.append(row) result_index.append(index_track[counter]) else: if includeMissing: result.append(row) result_index.append(index_track[counter]) counter += 1 #table = result #index_track = result_index return (result, result_index)
python
def select(table, index_track, field_name, op, value, includeMissing): '''Modifies the table and index_track lists based on the comparison. ''' result = [] result_index = [] counter = 0 for row in table: if detect_fields(field_name, convert_to_dict(row)): final_value = get_value(row, field_name) if do_op(final_value, op, value): result.append(row) result_index.append(index_track[counter]) else: if includeMissing: result.append(row) result_index.append(index_track[counter]) counter += 1 #table = result #index_track = result_index return (result, result_index)
[ "def", "select", "(", "table", ",", "index_track", ",", "field_name", ",", "op", ",", "value", ",", "includeMissing", ")", ":", "result", "=", "[", "]", "result_index", "=", "[", "]", "counter", "=", "0", "for", "row", "in", "table", ":", "if", "detect_fields", "(", "field_name", ",", "convert_to_dict", "(", "row", ")", ")", ":", "final_value", "=", "get_value", "(", "row", ",", "field_name", ")", "if", "do_op", "(", "final_value", ",", "op", ",", "value", ")", ":", "result", ".", "append", "(", "row", ")", "result_index", ".", "append", "(", "index_track", "[", "counter", "]", ")", "else", ":", "if", "includeMissing", ":", "result", ".", "append", "(", "row", ")", "result_index", ".", "append", "(", "index_track", "[", "counter", "]", ")", "counter", "+=", "1", "#table = result", "#index_track = result_index", "return", "(", "result", ",", "result_index", ")" ]
Modifies the table and index_track lists based on the comparison.
[ "Modifies", "the", "table", "and", "index_track", "lists", "based", "on", "the", "comparison", "." ]
707502cd928e5be6bd5e46d7f6de7da0e188cf1e
https://github.com/MakerReduxCorp/PLOD/blob/707502cd928e5be6bd5e46d7f6de7da0e188cf1e/PLOD/internal.py#L312-L331
245,231
pudo-attic/loadkit
loadkit/logger.py
capture
def capture(package, prefix, modules=[], level=logging.DEBUG): """ Capture log messages for the given modules and archive them to a ``LogFile`` resource. """ handler = LogFileHandler(package, prefix) formatter = logging.Formatter(FORMAT) handler.setFormatter(formatter) modules = set(modules + ['loadkit']) for logger in modules: if not hasattr(logger, 'addHandler'): logger = logging.getLogger(logger) logger.setLevel(level=level) logger.addHandler(handler) return handler
python
def capture(package, prefix, modules=[], level=logging.DEBUG): """ Capture log messages for the given modules and archive them to a ``LogFile`` resource. """ handler = LogFileHandler(package, prefix) formatter = logging.Formatter(FORMAT) handler.setFormatter(formatter) modules = set(modules + ['loadkit']) for logger in modules: if not hasattr(logger, 'addHandler'): logger = logging.getLogger(logger) logger.setLevel(level=level) logger.addHandler(handler) return handler
[ "def", "capture", "(", "package", ",", "prefix", ",", "modules", "=", "[", "]", ",", "level", "=", "logging", ".", "DEBUG", ")", ":", "handler", "=", "LogFileHandler", "(", "package", ",", "prefix", ")", "formatter", "=", "logging", ".", "Formatter", "(", "FORMAT", ")", "handler", ".", "setFormatter", "(", "formatter", ")", "modules", "=", "set", "(", "modules", "+", "[", "'loadkit'", "]", ")", "for", "logger", "in", "modules", ":", "if", "not", "hasattr", "(", "logger", ",", "'addHandler'", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "logger", ")", "logger", ".", "setLevel", "(", "level", "=", "level", ")", "logger", ".", "addHandler", "(", "handler", ")", "return", "handler" ]
Capture log messages for the given modules and archive them to a ``LogFile`` resource.
[ "Capture", "log", "messages", "for", "the", "given", "modules", "and", "archive", "them", "to", "a", "LogFile", "resource", "." ]
1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c
https://github.com/pudo-attic/loadkit/blob/1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c/loadkit/logger.py#L32-L46
245,232
pudo-attic/loadkit
loadkit/logger.py
load
def load(package, prefix, offset=0, limit=1000): """ Load lines from the log file with pagination support. """ logs = package.all(LogFile, unicode(prefix)) logs = sorted(logs, key=lambda l: l.name, reverse=True) seen = 0 record = None tmp = tempfile.NamedTemporaryFile(suffix='.log') for log in logs: shutil.copyfileobj(log.fh(), tmp) tmp.seek(0) for line in reversed(list(tmp)): seen += 1 if seen < offset: continue if seen > limit: tmp.close() return try: d, mo, l, m = line.split(' %s ' % SEP, 4) if record is not None: yield record record = {'time': d, 'module': mo, 'level': l, 'message': m} except ValueError: if record is not None: record['message'] += '\n' + line tmp.seek(0) tmp.close() if record is not None: yield record
python
def load(package, prefix, offset=0, limit=1000): """ Load lines from the log file with pagination support. """ logs = package.all(LogFile, unicode(prefix)) logs = sorted(logs, key=lambda l: l.name, reverse=True) seen = 0 record = None tmp = tempfile.NamedTemporaryFile(suffix='.log') for log in logs: shutil.copyfileobj(log.fh(), tmp) tmp.seek(0) for line in reversed(list(tmp)): seen += 1 if seen < offset: continue if seen > limit: tmp.close() return try: d, mo, l, m = line.split(' %s ' % SEP, 4) if record is not None: yield record record = {'time': d, 'module': mo, 'level': l, 'message': m} except ValueError: if record is not None: record['message'] += '\n' + line tmp.seek(0) tmp.close() if record is not None: yield record
[ "def", "load", "(", "package", ",", "prefix", ",", "offset", "=", "0", ",", "limit", "=", "1000", ")", ":", "logs", "=", "package", ".", "all", "(", "LogFile", ",", "unicode", "(", "prefix", ")", ")", "logs", "=", "sorted", "(", "logs", ",", "key", "=", "lambda", "l", ":", "l", ".", "name", ",", "reverse", "=", "True", ")", "seen", "=", "0", "record", "=", "None", "tmp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.log'", ")", "for", "log", "in", "logs", ":", "shutil", ".", "copyfileobj", "(", "log", ".", "fh", "(", ")", ",", "tmp", ")", "tmp", ".", "seek", "(", "0", ")", "for", "line", "in", "reversed", "(", "list", "(", "tmp", ")", ")", ":", "seen", "+=", "1", "if", "seen", "<", "offset", ":", "continue", "if", "seen", ">", "limit", ":", "tmp", ".", "close", "(", ")", "return", "try", ":", "d", ",", "mo", ",", "l", ",", "m", "=", "line", ".", "split", "(", "' %s '", "%", "SEP", ",", "4", ")", "if", "record", "is", "not", "None", ":", "yield", "record", "record", "=", "{", "'time'", ":", "d", ",", "'module'", ":", "mo", ",", "'level'", ":", "l", ",", "'message'", ":", "m", "}", "except", "ValueError", ":", "if", "record", "is", "not", "None", ":", "record", "[", "'message'", "]", "+=", "'\\n'", "+", "line", "tmp", ".", "seek", "(", "0", ")", "tmp", ".", "close", "(", ")", "if", "record", "is", "not", "None", ":", "yield", "record" ]
Load lines from the log file with pagination support.
[ "Load", "lines", "from", "the", "log", "file", "with", "pagination", "support", "." ]
1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c
https://github.com/pudo-attic/loadkit/blob/1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c/loadkit/logger.py#L49-L77
245,233
limpyd/redis-limpyd-jobs
example.py
clean
def clean(): """ Clean data created by this script """ for queue in MyQueue.collection().instances(): queue.delete() for job in MyJob.collection().instances(): job.delete() for person in Person.collection().instances(): person.delete()
python
def clean(): """ Clean data created by this script """ for queue in MyQueue.collection().instances(): queue.delete() for job in MyJob.collection().instances(): job.delete() for person in Person.collection().instances(): person.delete()
[ "def", "clean", "(", ")", ":", "for", "queue", "in", "MyQueue", ".", "collection", "(", ")", ".", "instances", "(", ")", ":", "queue", ".", "delete", "(", ")", "for", "job", "in", "MyJob", ".", "collection", "(", ")", ".", "instances", "(", ")", ":", "job", ".", "delete", "(", ")", "for", "person", "in", "Person", ".", "collection", "(", ")", ".", "instances", "(", ")", ":", "person", ".", "delete", "(", ")" ]
Clean data created by this script
[ "Clean", "data", "created", "by", "this", "script" ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/example.py#L164-L173
245,234
limpyd/redis-limpyd-jobs
example.py
MyJob.run
def run(self, queue): """ Create the fullname, and store a a message serving as result in the job """ # add some random time to simulate a long job time.sleep(random.random()) # compute the fullname obj = self.get_object() obj.fullname.hset('%s %s' % tuple(obj.hmget('firstname', 'lastname'))) # this will the "result" of the job result = 'Created fullname for Person %s: %s' % (obj.pk.get(), obj.fullname.hget()) # save the result of the callback in the job itself self.result.set(result) # return the result for future use in the worker return result
python
def run(self, queue): """ Create the fullname, and store a a message serving as result in the job """ # add some random time to simulate a long job time.sleep(random.random()) # compute the fullname obj = self.get_object() obj.fullname.hset('%s %s' % tuple(obj.hmget('firstname', 'lastname'))) # this will the "result" of the job result = 'Created fullname for Person %s: %s' % (obj.pk.get(), obj.fullname.hget()) # save the result of the callback in the job itself self.result.set(result) # return the result for future use in the worker return result
[ "def", "run", "(", "self", ",", "queue", ")", ":", "# add some random time to simulate a long job", "time", ".", "sleep", "(", "random", ".", "random", "(", ")", ")", "# compute the fullname", "obj", "=", "self", ".", "get_object", "(", ")", "obj", ".", "fullname", ".", "hset", "(", "'%s %s'", "%", "tuple", "(", "obj", ".", "hmget", "(", "'firstname'", ",", "'lastname'", ")", ")", ")", "# this will the \"result\" of the job", "result", "=", "'Created fullname for Person %s: %s'", "%", "(", "obj", ".", "pk", ".", "get", "(", ")", ",", "obj", ".", "fullname", ".", "hget", "(", ")", ")", "# save the result of the callback in the job itself", "self", ".", "result", ".", "set", "(", "result", ")", "# return the result for future use in the worker", "return", "result" ]
Create the fullname, and store a a message serving as result in the job
[ "Create", "the", "fullname", "and", "store", "a", "a", "message", "serving", "as", "result", "in", "the", "job" ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/example.py#L58-L76
245,235
limpyd/redis-limpyd-jobs
example.py
FullNameWorker.job_success
def job_success(self, job, queue, job_result): """ Update the queue's dates and number of jobs managed, and save into the job the result received by the callback. """ # display what was done obj = job.get_object() message = '[%s|%s] %s [%s]' % (queue.name.hget(), obj.pk.get(), job_result, threading.current_thread().name) self.log(message) # default stuff: update job and queue statuses, and do logging super(FullNameWorker, self).job_success(job, queue, job_result) # update the queue's dates queue_fields_to_update = { 'last_job_date': str(datetime.utcnow()) } if not queue.first_job_date.hget(): queue_fields_to_update['first_job_date'] = queue_fields_to_update['last_job_date'] queue.hmset(**queue_fields_to_update) # update the jobs counter on the queue queue.jobs_counter.hincrby(1) # save a ref to the job to display at the end of this example script self.jobs.append(int(job.pk.get()))
python
def job_success(self, job, queue, job_result): """ Update the queue's dates and number of jobs managed, and save into the job the result received by the callback. """ # display what was done obj = job.get_object() message = '[%s|%s] %s [%s]' % (queue.name.hget(), obj.pk.get(), job_result, threading.current_thread().name) self.log(message) # default stuff: update job and queue statuses, and do logging super(FullNameWorker, self).job_success(job, queue, job_result) # update the queue's dates queue_fields_to_update = { 'last_job_date': str(datetime.utcnow()) } if not queue.first_job_date.hget(): queue_fields_to_update['first_job_date'] = queue_fields_to_update['last_job_date'] queue.hmset(**queue_fields_to_update) # update the jobs counter on the queue queue.jobs_counter.hincrby(1) # save a ref to the job to display at the end of this example script self.jobs.append(int(job.pk.get()))
[ "def", "job_success", "(", "self", ",", "job", ",", "queue", ",", "job_result", ")", ":", "# display what was done", "obj", "=", "job", ".", "get_object", "(", ")", "message", "=", "'[%s|%s] %s [%s]'", "%", "(", "queue", ".", "name", ".", "hget", "(", ")", ",", "obj", ".", "pk", ".", "get", "(", ")", ",", "job_result", ",", "threading", ".", "current_thread", "(", ")", ".", "name", ")", "self", ".", "log", "(", "message", ")", "# default stuff: update job and queue statuses, and do logging", "super", "(", "FullNameWorker", ",", "self", ")", ".", "job_success", "(", "job", ",", "queue", ",", "job_result", ")", "# update the queue's dates", "queue_fields_to_update", "=", "{", "'last_job_date'", ":", "str", "(", "datetime", ".", "utcnow", "(", ")", ")", "}", "if", "not", "queue", ".", "first_job_date", ".", "hget", "(", ")", ":", "queue_fields_to_update", "[", "'first_job_date'", "]", "=", "queue_fields_to_update", "[", "'last_job_date'", "]", "queue", ".", "hmset", "(", "*", "*", "queue_fields_to_update", ")", "# update the jobs counter on the queue", "queue", ".", "jobs_counter", ".", "hincrby", "(", "1", ")", "# save a ref to the job to display at the end of this example script", "self", ".", "jobs", ".", "append", "(", "int", "(", "job", ".", "pk", ".", "get", "(", ")", ")", ")" ]
Update the queue's dates and number of jobs managed, and save into the job the result received by the callback.
[ "Update", "the", "queue", "s", "dates", "and", "number", "of", "jobs", "managed", "and", "save", "into", "the", "job", "the", "result", "received", "by", "the", "callback", "." ]
264c71029bad4377d6132bf8bb9c55c44f3b03a2
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/example.py#L122-L151
245,236
quasipedia/swaggery
swaggery/introspection/introspection.py
ResourceListing.resource_listing
def resource_listing(cls, request) -> [(200, 'Ok', ResourceListingModel)]: '''Return the list of all available resources on the system. Resources are filtered according to the permission system, so querying this resource as different users may bare different results.''' apis = [api.get_swagger_fragment() for api in Api if not api.private] Respond(200, { 'apiVersion': cls.api.version, 'swaggerVersion': cls.api.swagger_version, 'apis': apis })
python
def resource_listing(cls, request) -> [(200, 'Ok', ResourceListingModel)]: '''Return the list of all available resources on the system. Resources are filtered according to the permission system, so querying this resource as different users may bare different results.''' apis = [api.get_swagger_fragment() for api in Api if not api.private] Respond(200, { 'apiVersion': cls.api.version, 'swaggerVersion': cls.api.swagger_version, 'apis': apis })
[ "def", "resource_listing", "(", "cls", ",", "request", ")", "->", "[", "(", "200", ",", "'Ok'", ",", "ResourceListingModel", ")", "]", ":", "apis", "=", "[", "api", ".", "get_swagger_fragment", "(", ")", "for", "api", "in", "Api", "if", "not", "api", ".", "private", "]", "Respond", "(", "200", ",", "{", "'apiVersion'", ":", "cls", ".", "api", ".", "version", ",", "'swaggerVersion'", ":", "cls", ".", "api", ".", "swagger_version", ",", "'apis'", ":", "apis", "}", ")" ]
Return the list of all available resources on the system. Resources are filtered according to the permission system, so querying this resource as different users may bare different results.
[ "Return", "the", "list", "of", "all", "available", "resources", "on", "the", "system", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/introspection/introspection.py#L45-L55
245,237
quasipedia/swaggery
swaggery/introspection/introspection.py
ApiDeclaration._extract_models
def _extract_models(cls, apis): '''An helper function to extract all used models from the apis.''' # TODO: This would probably be much better if the info would be # extracted from the classes, rather than from the swagger # representation... models = set() for api in apis: for op in api.get('operations', []): models.add(op['type']) for param in op.get('parameters', []): models.add(param.get('type', 'void')) for msg in op['responseMessages']: models.add(msg.get('responseModel', 'void')) # Convert from swagger name representation to classes models = map(lambda m: Model.name_to_cls[m], models) ret = {} for model in models: if model.native_type: continue obj = model.schema.copy() obj['id'] = model.name ret[model.name] = obj return ret
python
def _extract_models(cls, apis): '''An helper function to extract all used models from the apis.''' # TODO: This would probably be much better if the info would be # extracted from the classes, rather than from the swagger # representation... models = set() for api in apis: for op in api.get('operations', []): models.add(op['type']) for param in op.get('parameters', []): models.add(param.get('type', 'void')) for msg in op['responseMessages']: models.add(msg.get('responseModel', 'void')) # Convert from swagger name representation to classes models = map(lambda m: Model.name_to_cls[m], models) ret = {} for model in models: if model.native_type: continue obj = model.schema.copy() obj['id'] = model.name ret[model.name] = obj return ret
[ "def", "_extract_models", "(", "cls", ",", "apis", ")", ":", "# TODO: This would probably be much better if the info would be", "# extracted from the classes, rather than from the swagger", "# representation...", "models", "=", "set", "(", ")", "for", "api", "in", "apis", ":", "for", "op", "in", "api", ".", "get", "(", "'operations'", ",", "[", "]", ")", ":", "models", ".", "add", "(", "op", "[", "'type'", "]", ")", "for", "param", "in", "op", ".", "get", "(", "'parameters'", ",", "[", "]", ")", ":", "models", ".", "add", "(", "param", ".", "get", "(", "'type'", ",", "'void'", ")", ")", "for", "msg", "in", "op", "[", "'responseMessages'", "]", ":", "models", ".", "add", "(", "msg", ".", "get", "(", "'responseModel'", ",", "'void'", ")", ")", "# Convert from swagger name representation to classes", "models", "=", "map", "(", "lambda", "m", ":", "Model", ".", "name_to_cls", "[", "m", "]", ",", "models", ")", "ret", "=", "{", "}", "for", "model", "in", "models", ":", "if", "model", ".", "native_type", ":", "continue", "obj", "=", "model", ".", "schema", ".", "copy", "(", ")", "obj", "[", "'id'", "]", "=", "model", ".", "name", "ret", "[", "model", ".", "name", "]", "=", "obj", "return", "ret" ]
An helper function to extract all used models from the apis.
[ "An", "helper", "function", "to", "extract", "all", "used", "models", "from", "the", "apis", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/introspection/introspection.py#L68-L90
245,238
quasipedia/swaggery
swaggery/introspection/introspection.py
ApiDeclaration.api_declaration
def api_declaration( cls, request, api_path: (Ptypes.path, String('The path for the info on the resource.'))) -> [ (200, 'Ok', ApiDeclarationModel), (404, 'Not a valid resource.')]: '''Return the complete specification of a single API. Resources are filtered according to the permission system, so querying this resource as different users may bare different results. ''' if api_path in cls.__cache: Respond(200, cls.__cache[api_path]) # Select the resources belonging to this API resources = tuple(filter(lambda ep: ep.api.path == api_path, Resource)) if not resources: Respond(404) apis = [r.get_swagger_fragment() for r in resources if not r.private] cls.__cache[api_path] = { 'apiVersion': cls.api.version, 'swaggerVersion': cls.api.swagger_version, 'basePath': request.url_root[:-1], # Remove trailing slash 'resourcePath': '/{}'.format(api_path), 'apis': apis, 'models': cls._extract_models(apis), 'consumes': ['application/json'], 'produces': ['application/json'] } Respond(200, cls.__cache[api_path])
python
def api_declaration( cls, request, api_path: (Ptypes.path, String('The path for the info on the resource.'))) -> [ (200, 'Ok', ApiDeclarationModel), (404, 'Not a valid resource.')]: '''Return the complete specification of a single API. Resources are filtered according to the permission system, so querying this resource as different users may bare different results. ''' if api_path in cls.__cache: Respond(200, cls.__cache[api_path]) # Select the resources belonging to this API resources = tuple(filter(lambda ep: ep.api.path == api_path, Resource)) if not resources: Respond(404) apis = [r.get_swagger_fragment() for r in resources if not r.private] cls.__cache[api_path] = { 'apiVersion': cls.api.version, 'swaggerVersion': cls.api.swagger_version, 'basePath': request.url_root[:-1], # Remove trailing slash 'resourcePath': '/{}'.format(api_path), 'apis': apis, 'models': cls._extract_models(apis), 'consumes': ['application/json'], 'produces': ['application/json'] } Respond(200, cls.__cache[api_path])
[ "def", "api_declaration", "(", "cls", ",", "request", ",", "api_path", ":", "(", "Ptypes", ".", "path", ",", "String", "(", "'The path for the info on the resource.'", ")", ")", ")", "->", "[", "(", "200", ",", "'Ok'", ",", "ApiDeclarationModel", ")", ",", "(", "404", ",", "'Not a valid resource.'", ")", "]", ":", "if", "api_path", "in", "cls", ".", "__cache", ":", "Respond", "(", "200", ",", "cls", ".", "__cache", "[", "api_path", "]", ")", "# Select the resources belonging to this API", "resources", "=", "tuple", "(", "filter", "(", "lambda", "ep", ":", "ep", ".", "api", ".", "path", "==", "api_path", ",", "Resource", ")", ")", "if", "not", "resources", ":", "Respond", "(", "404", ")", "apis", "=", "[", "r", ".", "get_swagger_fragment", "(", ")", "for", "r", "in", "resources", "if", "not", "r", ".", "private", "]", "cls", ".", "__cache", "[", "api_path", "]", "=", "{", "'apiVersion'", ":", "cls", ".", "api", ".", "version", ",", "'swaggerVersion'", ":", "cls", ".", "api", ".", "swagger_version", ",", "'basePath'", ":", "request", ".", "url_root", "[", ":", "-", "1", "]", ",", "# Remove trailing slash", "'resourcePath'", ":", "'/{}'", ".", "format", "(", "api_path", ")", ",", "'apis'", ":", "apis", ",", "'models'", ":", "cls", ".", "_extract_models", "(", "apis", ")", ",", "'consumes'", ":", "[", "'application/json'", "]", ",", "'produces'", ":", "[", "'application/json'", "]", "}", "Respond", "(", "200", ",", "cls", ".", "__cache", "[", "api_path", "]", ")" ]
Return the complete specification of a single API. Resources are filtered according to the permission system, so querying this resource as different users may bare different results.
[ "Return", "the", "complete", "specification", "of", "a", "single", "API", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/introspection/introspection.py#L93-L121
245,239
ppo/django-guitar
guitar/middlewares.py
GuitarMiddleware._get_route_info
def _get_route_info(self, request): """Return information about the current URL.""" resolve_match = resolve(request.path) app_name = resolve_match.app_name # The application namespace for the URL pattern that matches the URL. namespace = resolve_match.namespace # The instance namespace for the URL pattern that matches the URL. url_name = resolve_match.url_name # The name of the URL pattern that matches the URL. view_name = resolve_match.view_name # Name of the view that matches the URL, incl. namespace if there's one. return { "app_name": app_name or None, "namespace": namespace or None, "url_name": url_name or None, "view_name": view_name or None, }
python
def _get_route_info(self, request): """Return information about the current URL.""" resolve_match = resolve(request.path) app_name = resolve_match.app_name # The application namespace for the URL pattern that matches the URL. namespace = resolve_match.namespace # The instance namespace for the URL pattern that matches the URL. url_name = resolve_match.url_name # The name of the URL pattern that matches the URL. view_name = resolve_match.view_name # Name of the view that matches the URL, incl. namespace if there's one. return { "app_name": app_name or None, "namespace": namespace or None, "url_name": url_name or None, "view_name": view_name or None, }
[ "def", "_get_route_info", "(", "self", ",", "request", ")", ":", "resolve_match", "=", "resolve", "(", "request", ".", "path", ")", "app_name", "=", "resolve_match", ".", "app_name", "# The application namespace for the URL pattern that matches the URL.", "namespace", "=", "resolve_match", ".", "namespace", "# The instance namespace for the URL pattern that matches the URL.", "url_name", "=", "resolve_match", ".", "url_name", "# The name of the URL pattern that matches the URL.", "view_name", "=", "resolve_match", ".", "view_name", "# Name of the view that matches the URL, incl. namespace if there's one.", "return", "{", "\"app_name\"", ":", "app_name", "or", "None", ",", "\"namespace\"", ":", "namespace", "or", "None", ",", "\"url_name\"", ":", "url_name", "or", "None", ",", "\"view_name\"", ":", "view_name", "or", "None", ",", "}" ]
Return information about the current URL.
[ "Return", "information", "about", "the", "current", "URL", "." ]
857282219c0c4ff5907c3ad04ef012281d245348
https://github.com/ppo/django-guitar/blob/857282219c0c4ff5907c3ad04ef012281d245348/guitar/middlewares.py#L24-L38
245,240
delfick/aws_syncr
aws_syncr/amazon/amazon.py
Amazon.validate_account
def validate_account(self): """Make sure we are able to connect to the right account""" self._validating = True with self.catch_invalid_credentials(): log.info("Finding a role to check the account id") a_role = list(self.iam.resource.roles.limit(1)) if not a_role: raise AwsSyncrError("Couldn't find an iam role, can't validate the account....") account_id = a_role[0].meta.data['Arn'].split(":", 5)[4] chosen_account = self.accounts[self.environment] if chosen_account != account_id: raise BadCredentials("Don't have credentials for the correct account!", wanted=chosen_account, got=account_id) self._validating = False self._validated = True
python
def validate_account(self): """Make sure we are able to connect to the right account""" self._validating = True with self.catch_invalid_credentials(): log.info("Finding a role to check the account id") a_role = list(self.iam.resource.roles.limit(1)) if not a_role: raise AwsSyncrError("Couldn't find an iam role, can't validate the account....") account_id = a_role[0].meta.data['Arn'].split(":", 5)[4] chosen_account = self.accounts[self.environment] if chosen_account != account_id: raise BadCredentials("Don't have credentials for the correct account!", wanted=chosen_account, got=account_id) self._validating = False self._validated = True
[ "def", "validate_account", "(", "self", ")", ":", "self", ".", "_validating", "=", "True", "with", "self", ".", "catch_invalid_credentials", "(", ")", ":", "log", ".", "info", "(", "\"Finding a role to check the account id\"", ")", "a_role", "=", "list", "(", "self", ".", "iam", ".", "resource", ".", "roles", ".", "limit", "(", "1", ")", ")", "if", "not", "a_role", ":", "raise", "AwsSyncrError", "(", "\"Couldn't find an iam role, can't validate the account....\"", ")", "account_id", "=", "a_role", "[", "0", "]", ".", "meta", ".", "data", "[", "'Arn'", "]", ".", "split", "(", "\":\"", ",", "5", ")", "[", "4", "]", "chosen_account", "=", "self", ".", "accounts", "[", "self", ".", "environment", "]", "if", "chosen_account", "!=", "account_id", ":", "raise", "BadCredentials", "(", "\"Don't have credentials for the correct account!\"", ",", "wanted", "=", "chosen_account", ",", "got", "=", "account_id", ")", "self", ".", "_validating", "=", "False", "self", ".", "_validated", "=", "True" ]
Make sure we are able to connect to the right account
[ "Make", "sure", "we", "are", "able", "to", "connect", "to", "the", "right", "account" ]
8cd214b27c1eee98dfba4632cbb8bc0ae36356bd
https://github.com/delfick/aws_syncr/blob/8cd214b27c1eee98dfba4632cbb8bc0ae36356bd/aws_syncr/amazon/amazon.py#L46-L61
245,241
rameshg87/pyremotevbox
pyremotevbox/ZSI/ServiceContainer.py
SOAPRequestHandler.do_GET
def do_GET(self): '''The GET command. ''' if self.path.lower().endswith("?wsdl"): service_path = self.path[:-5] service = self.server.getNode(service_path) if hasattr(service, "_wsdl"): wsdl = service._wsdl # update the soap:location tag in the wsdl to the actual server # location # - default to 'http' as protocol, or use server-specified protocol proto = 'http' if hasattr(self.server,'proto'): proto = self.server.proto serviceUrl = '%s://%s:%d%s' % (proto, self.server.server_name, self.server.server_port, service_path) soapAddress = '<soap:address location="%s"/>' % serviceUrl wsdlre = re.compile('\<soap:address[^\>]*>',re.IGNORECASE) wsdl = re.sub(wsdlre,soapAddress,wsdl) self.send_xml(wsdl) else: self.send_error(404, "WSDL not available for that service [%s]." % self.path) else: self.send_error(404, "Service not found [%s]." % self.path)
python
def do_GET(self): '''The GET command. ''' if self.path.lower().endswith("?wsdl"): service_path = self.path[:-5] service = self.server.getNode(service_path) if hasattr(service, "_wsdl"): wsdl = service._wsdl # update the soap:location tag in the wsdl to the actual server # location # - default to 'http' as protocol, or use server-specified protocol proto = 'http' if hasattr(self.server,'proto'): proto = self.server.proto serviceUrl = '%s://%s:%d%s' % (proto, self.server.server_name, self.server.server_port, service_path) soapAddress = '<soap:address location="%s"/>' % serviceUrl wsdlre = re.compile('\<soap:address[^\>]*>',re.IGNORECASE) wsdl = re.sub(wsdlre,soapAddress,wsdl) self.send_xml(wsdl) else: self.send_error(404, "WSDL not available for that service [%s]." % self.path) else: self.send_error(404, "Service not found [%s]." % self.path)
[ "def", "do_GET", "(", "self", ")", ":", "if", "self", ".", "path", ".", "lower", "(", ")", ".", "endswith", "(", "\"?wsdl\"", ")", ":", "service_path", "=", "self", ".", "path", "[", ":", "-", "5", "]", "service", "=", "self", ".", "server", ".", "getNode", "(", "service_path", ")", "if", "hasattr", "(", "service", ",", "\"_wsdl\"", ")", ":", "wsdl", "=", "service", ".", "_wsdl", "# update the soap:location tag in the wsdl to the actual server", "# location", "# - default to 'http' as protocol, or use server-specified protocol", "proto", "=", "'http'", "if", "hasattr", "(", "self", ".", "server", ",", "'proto'", ")", ":", "proto", "=", "self", ".", "server", ".", "proto", "serviceUrl", "=", "'%s://%s:%d%s'", "%", "(", "proto", ",", "self", ".", "server", ".", "server_name", ",", "self", ".", "server", ".", "server_port", ",", "service_path", ")", "soapAddress", "=", "'<soap:address location=\"%s\"/>'", "%", "serviceUrl", "wsdlre", "=", "re", ".", "compile", "(", "'\\<soap:address[^\\>]*>'", ",", "re", ".", "IGNORECASE", ")", "wsdl", "=", "re", ".", "sub", "(", "wsdlre", ",", "soapAddress", ",", "wsdl", ")", "self", ".", "send_xml", "(", "wsdl", ")", "else", ":", "self", ".", "send_error", "(", "404", ",", "\"WSDL not available for that service [%s].\"", "%", "self", ".", "path", ")", "else", ":", "self", ".", "send_error", "(", "404", ",", "\"Service not found [%s].\"", "%", "self", ".", "path", ")" ]
The GET command.
[ "The", "GET", "command", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/ServiceContainer.py#L357-L382
245,242
laurenceputra/mongo_notebook_manager
mongo_notebook_manager/__init__.py
MongoNotebookManager.get_notebook_names
def get_notebook_names(self, path=''): """List all notebook names in the notebook dir and path.""" path = path.strip('/') spec = {'path': path, 'type': 'notebook'} fields = {'name': 1} notebooks = list(self._connect_collection(self.notebook_collection).find(spec,fields)) names = [n['name'] for n in notebooks] return names
python
def get_notebook_names(self, path=''): """List all notebook names in the notebook dir and path.""" path = path.strip('/') spec = {'path': path, 'type': 'notebook'} fields = {'name': 1} notebooks = list(self._connect_collection(self.notebook_collection).find(spec,fields)) names = [n['name'] for n in notebooks] return names
[ "def", "get_notebook_names", "(", "self", ",", "path", "=", "''", ")", ":", "path", "=", "path", ".", "strip", "(", "'/'", ")", "spec", "=", "{", "'path'", ":", "path", ",", "'type'", ":", "'notebook'", "}", "fields", "=", "{", "'name'", ":", "1", "}", "notebooks", "=", "list", "(", "self", ".", "_connect_collection", "(", "self", ".", "notebook_collection", ")", ".", "find", "(", "spec", ",", "fields", ")", ")", "names", "=", "[", "n", "[", "'name'", "]", "for", "n", "in", "notebooks", "]", "return", "names" ]
List all notebook names in the notebook dir and path.
[ "List", "all", "notebook", "names", "in", "the", "notebook", "dir", "and", "path", "." ]
d7f4031e236ff19b8f0658f8ad1fcd1b51815251
https://github.com/laurenceputra/mongo_notebook_manager/blob/d7f4031e236ff19b8f0658f8ad1fcd1b51815251/mongo_notebook_manager/__init__.py#L65-L73
245,243
laurenceputra/mongo_notebook_manager
mongo_notebook_manager/__init__.py
MongoNotebookManager.create_notebook
def create_notebook(self, model=None, path=''): """Create a new notebook and return its model with no content.""" path = path.strip('/') if model is None: model = {} if 'content' not in model: metadata = current.new_metadata(name=u'') model['content'] = current.new_notebook(metadata=metadata) if 'name' not in model: model['name'] = self.increment_filename('Untitled', path) model['path'] = path model['type'] = 'notebook' model = self.save_notebook(model, model['name'], model['path']) return model
python
def create_notebook(self, model=None, path=''): """Create a new notebook and return its model with no content.""" path = path.strip('/') if model is None: model = {} if 'content' not in model: metadata = current.new_metadata(name=u'') model['content'] = current.new_notebook(metadata=metadata) if 'name' not in model: model['name'] = self.increment_filename('Untitled', path) model['path'] = path model['type'] = 'notebook' model = self.save_notebook(model, model['name'], model['path']) return model
[ "def", "create_notebook", "(", "self", ",", "model", "=", "None", ",", "path", "=", "''", ")", ":", "path", "=", "path", ".", "strip", "(", "'/'", ")", "if", "model", "is", "None", ":", "model", "=", "{", "}", "if", "'content'", "not", "in", "model", ":", "metadata", "=", "current", ".", "new_metadata", "(", "name", "=", "u''", ")", "model", "[", "'content'", "]", "=", "current", ".", "new_notebook", "(", "metadata", "=", "metadata", ")", "if", "'name'", "not", "in", "model", ":", "model", "[", "'name'", "]", "=", "self", ".", "increment_filename", "(", "'Untitled'", ",", "path", ")", "model", "[", "'path'", "]", "=", "path", "model", "[", "'type'", "]", "=", "'notebook'", "model", "=", "self", ".", "save_notebook", "(", "model", ",", "model", "[", "'name'", "]", ",", "model", "[", "'path'", "]", ")", "return", "model" ]
Create a new notebook and return its model with no content.
[ "Create", "a", "new", "notebook", "and", "return", "its", "model", "with", "no", "content", "." ]
d7f4031e236ff19b8f0658f8ad1fcd1b51815251
https://github.com/laurenceputra/mongo_notebook_manager/blob/d7f4031e236ff19b8f0658f8ad1fcd1b51815251/mongo_notebook_manager/__init__.py#L202-L217
245,244
mweb/appconfig
appconfig/appconfig.py
_get_type
def _get_type(stype): ''' Get the python type for a given string describtion for a type. @param stype: The string representing the type to return @return: The python type if available ''' stype = stype.lower() if stype == 'str': return str if stype == 'unicode': if PY2: return unicode else: return str if stype == 'int': return int if stype == 'float': return float if stype == 'bool': return bool raise AppConfigValueException('Unsuported type given: {0}'.format(stype))
python
def _get_type(stype): ''' Get the python type for a given string describtion for a type. @param stype: The string representing the type to return @return: The python type if available ''' stype = stype.lower() if stype == 'str': return str if stype == 'unicode': if PY2: return unicode else: return str if stype == 'int': return int if stype == 'float': return float if stype == 'bool': return bool raise AppConfigValueException('Unsuported type given: {0}'.format(stype))
[ "def", "_get_type", "(", "stype", ")", ":", "stype", "=", "stype", ".", "lower", "(", ")", "if", "stype", "==", "'str'", ":", "return", "str", "if", "stype", "==", "'unicode'", ":", "if", "PY2", ":", "return", "unicode", "else", ":", "return", "str", "if", "stype", "==", "'int'", ":", "return", "int", "if", "stype", "==", "'float'", ":", "return", "float", "if", "stype", "==", "'bool'", ":", "return", "bool", "raise", "AppConfigValueException", "(", "'Unsuported type given: {0}'", ".", "format", "(", "stype", ")", ")" ]
Get the python type for a given string describtion for a type. @param stype: The string representing the type to return @return: The python type if available
[ "Get", "the", "python", "type", "for", "a", "given", "string", "describtion", "for", "a", "type", "." ]
780c1fe3b2f537463a46e335186b7741add88a1e
https://github.com/mweb/appconfig/blob/780c1fe3b2f537463a46e335186b7741add88a1e/appconfig/appconfig.py#L347-L367
245,245
mweb/appconfig
appconfig/appconfig.py
_format_message
def _format_message(value, line_length, indent="", first_indent=None): ''' Return a string with newlines so that the given string fits into this line length. At the start of the line the indent is added. This can be used for commenting the message out within a file or to indent your text. All \\t will be replaced with 4 spaces. @param value: The string to get as a commented multiline comment. @param line_length: The length of the line to fill. @param indent: The indent to use for printing or charcter to put in front @param first_indent: The first indent might be shorter. If None then the first line uses the normal indent as the rest of the string. @return: The string with newlines ''' if indent.find('\t'): indent = indent.replace('\t', ' ') result = [] if first_indent is None: first_indent = indent cindent = first_indent tmp = "*" * line_length for ele in value.split(' '): if ele.find('\t') >= 0: ele = ele.replace('\t', ' ') if (len(ele) + len(tmp)) >= line_length: result.append(tmp) tmp = '{0}{1}'.format(cindent, ele) cindent = indent else: tmp = "{0} {1}".format(tmp, ele) result.append(tmp) result = result[1:] return "\n".join(result)
python
def _format_message(value, line_length, indent="", first_indent=None): ''' Return a string with newlines so that the given string fits into this line length. At the start of the line the indent is added. This can be used for commenting the message out within a file or to indent your text. All \\t will be replaced with 4 spaces. @param value: The string to get as a commented multiline comment. @param line_length: The length of the line to fill. @param indent: The indent to use for printing or charcter to put in front @param first_indent: The first indent might be shorter. If None then the first line uses the normal indent as the rest of the string. @return: The string with newlines ''' if indent.find('\t'): indent = indent.replace('\t', ' ') result = [] if first_indent is None: first_indent = indent cindent = first_indent tmp = "*" * line_length for ele in value.split(' '): if ele.find('\t') >= 0: ele = ele.replace('\t', ' ') if (len(ele) + len(tmp)) >= line_length: result.append(tmp) tmp = '{0}{1}'.format(cindent, ele) cindent = indent else: tmp = "{0} {1}".format(tmp, ele) result.append(tmp) result = result[1:] return "\n".join(result)
[ "def", "_format_message", "(", "value", ",", "line_length", ",", "indent", "=", "\"\"", ",", "first_indent", "=", "None", ")", ":", "if", "indent", ".", "find", "(", "'\\t'", ")", ":", "indent", "=", "indent", ".", "replace", "(", "'\\t'", ",", "' '", ")", "result", "=", "[", "]", "if", "first_indent", "is", "None", ":", "first_indent", "=", "indent", "cindent", "=", "first_indent", "tmp", "=", "\"*\"", "*", "line_length", "for", "ele", "in", "value", ".", "split", "(", "' '", ")", ":", "if", "ele", ".", "find", "(", "'\\t'", ")", ">=", "0", ":", "ele", "=", "ele", ".", "replace", "(", "'\\t'", ",", "' '", ")", "if", "(", "len", "(", "ele", ")", "+", "len", "(", "tmp", ")", ")", ">=", "line_length", ":", "result", ".", "append", "(", "tmp", ")", "tmp", "=", "'{0}{1}'", ".", "format", "(", "cindent", ",", "ele", ")", "cindent", "=", "indent", "else", ":", "tmp", "=", "\"{0} {1}\"", ".", "format", "(", "tmp", ",", "ele", ")", "result", ".", "append", "(", "tmp", ")", "result", "=", "result", "[", "1", ":", "]", "return", "\"\\n\"", ".", "join", "(", "result", ")" ]
Return a string with newlines so that the given string fits into this line length. At the start of the line the indent is added. This can be used for commenting the message out within a file or to indent your text. All \\t will be replaced with 4 spaces. @param value: The string to get as a commented multiline comment. @param line_length: The length of the line to fill. @param indent: The indent to use for printing or charcter to put in front @param first_indent: The first indent might be shorter. If None then the first line uses the normal indent as the rest of the string. @return: The string with newlines
[ "Return", "a", "string", "with", "newlines", "so", "that", "the", "given", "string", "fits", "into", "this", "line", "length", ".", "At", "the", "start", "of", "the", "line", "the", "indent", "is", "added", ".", "This", "can", "be", "used", "for", "commenting", "the", "message", "out", "within", "a", "file", "or", "to", "indent", "your", "text", "." ]
780c1fe3b2f537463a46e335186b7741add88a1e
https://github.com/mweb/appconfig/blob/780c1fe3b2f537463a46e335186b7741add88a1e/appconfig/appconfig.py#L370-L407
245,246
mweb/appconfig
appconfig/appconfig.py
AppConfig.init_default_config
def init_default_config(self, path): ''' Initialize the config object and load the default configuration. The path to the config file must be provided. The name of the application is read from the config file. The config file stores the description and the default values for all configurations including the application name. @param path: The path to the config config file. ''' if not (os.path.exists(path) and os.path.isfile(path)): raise AppConfigValueException('The given config config file does ' 'not exist. ({0})'.format(path)) cfl = open(path, 'r') data = json.load(cfl) cfl.close() for key in data.keys(): if 'application_name' == key: self.application_name = data[key].lower() continue if 'application_author' == key: self.application_author = data[key].lower() continue if 'application_version' == key: self.application_version = data[key].lower() continue self._add_section_default(key, data[key])
python
def init_default_config(self, path): ''' Initialize the config object and load the default configuration. The path to the config file must be provided. The name of the application is read from the config file. The config file stores the description and the default values for all configurations including the application name. @param path: The path to the config config file. ''' if not (os.path.exists(path) and os.path.isfile(path)): raise AppConfigValueException('The given config config file does ' 'not exist. ({0})'.format(path)) cfl = open(path, 'r') data = json.load(cfl) cfl.close() for key in data.keys(): if 'application_name' == key: self.application_name = data[key].lower() continue if 'application_author' == key: self.application_author = data[key].lower() continue if 'application_version' == key: self.application_version = data[key].lower() continue self._add_section_default(key, data[key])
[ "def", "init_default_config", "(", "self", ",", "path", ")", ":", "if", "not", "(", "os", ".", "path", ".", "exists", "(", "path", ")", "and", "os", ".", "path", ".", "isfile", "(", "path", ")", ")", ":", "raise", "AppConfigValueException", "(", "'The given config config file does '", "'not exist. ({0})'", ".", "format", "(", "path", ")", ")", "cfl", "=", "open", "(", "path", ",", "'r'", ")", "data", "=", "json", ".", "load", "(", "cfl", ")", "cfl", ".", "close", "(", ")", "for", "key", "in", "data", ".", "keys", "(", ")", ":", "if", "'application_name'", "==", "key", ":", "self", ".", "application_name", "=", "data", "[", "key", "]", ".", "lower", "(", ")", "continue", "if", "'application_author'", "==", "key", ":", "self", ".", "application_author", "=", "data", "[", "key", "]", ".", "lower", "(", ")", "continue", "if", "'application_version'", "==", "key", ":", "self", ".", "application_version", "=", "data", "[", "key", "]", ".", "lower", "(", ")", "continue", "self", ".", "_add_section_default", "(", "key", ",", "data", "[", "key", "]", ")" ]
Initialize the config object and load the default configuration. The path to the config file must be provided. The name of the application is read from the config file. The config file stores the description and the default values for all configurations including the application name. @param path: The path to the config config file.
[ "Initialize", "the", "config", "object", "and", "load", "the", "default", "configuration", ".", "The", "path", "to", "the", "config", "file", "must", "be", "provided", ".", "The", "name", "of", "the", "application", "is", "read", "from", "the", "config", "file", ".", "The", "config", "file", "stores", "the", "description", "and", "the", "default", "values", "for", "all", "configurations", "including", "the", "application", "name", "." ]
780c1fe3b2f537463a46e335186b7741add88a1e
https://github.com/mweb/appconfig/blob/780c1fe3b2f537463a46e335186b7741add88a1e/appconfig/appconfig.py#L77-L102
245,247
mweb/appconfig
appconfig/appconfig.py
AppConfig.get_description
def get_description(self, section, key): ''' Get the description of a config key. If it does not exist an Exception will be thrown. @param section: the section where the key is stored. @param key: the key to get the description for. @return: A tuple with three elements (description, type, default) ''' if section in self.config_description: if key in self.config_description[section]: desc, value_type, default = \ self.config_description[section][key] return (desc, value_type, default) else: if self.has_option(section, key): # return an empty string since it is possible that a # section is not initialized, this happens if a plugin # that has some config values but is not initialized. return "", str, "" else: raise AppConfigValueException('Key ({0}) does not exist ' 'in section: {1}'.format(key, section)) else: if self.has_section(section): # return an empty string since it is possible that a section # is not initialized, this happens if a plugin that has some # config values but is not initialized. return "", str, "" else: raise AppConfigValueException('Section does not exist ' '[{0}]'.format(section))
python
def get_description(self, section, key): ''' Get the description of a config key. If it does not exist an Exception will be thrown. @param section: the section where the key is stored. @param key: the key to get the description for. @return: A tuple with three elements (description, type, default) ''' if section in self.config_description: if key in self.config_description[section]: desc, value_type, default = \ self.config_description[section][key] return (desc, value_type, default) else: if self.has_option(section, key): # return an empty string since it is possible that a # section is not initialized, this happens if a plugin # that has some config values but is not initialized. return "", str, "" else: raise AppConfigValueException('Key ({0}) does not exist ' 'in section: {1}'.format(key, section)) else: if self.has_section(section): # return an empty string since it is possible that a section # is not initialized, this happens if a plugin that has some # config values but is not initialized. return "", str, "" else: raise AppConfigValueException('Section does not exist ' '[{0}]'.format(section))
[ "def", "get_description", "(", "self", ",", "section", ",", "key", ")", ":", "if", "section", "in", "self", ".", "config_description", ":", "if", "key", "in", "self", ".", "config_description", "[", "section", "]", ":", "desc", ",", "value_type", ",", "default", "=", "self", ".", "config_description", "[", "section", "]", "[", "key", "]", "return", "(", "desc", ",", "value_type", ",", "default", ")", "else", ":", "if", "self", ".", "has_option", "(", "section", ",", "key", ")", ":", "# return an empty string since it is possible that a", "# section is not initialized, this happens if a plugin", "# that has some config values but is not initialized.", "return", "\"\"", ",", "str", ",", "\"\"", "else", ":", "raise", "AppConfigValueException", "(", "'Key ({0}) does not exist '", "'in section: {1}'", ".", "format", "(", "key", ",", "section", ")", ")", "else", ":", "if", "self", ".", "has_section", "(", "section", ")", ":", "# return an empty string since it is possible that a section", "# is not initialized, this happens if a plugin that has some", "# config values but is not initialized.", "return", "\"\"", ",", "str", ",", "\"\"", "else", ":", "raise", "AppConfigValueException", "(", "'Section does not exist '", "'[{0}]'", ".", "format", "(", "section", ")", ")" ]
Get the description of a config key. If it does not exist an Exception will be thrown. @param section: the section where the key is stored. @param key: the key to get the description for. @return: A tuple with three elements (description, type, default)
[ "Get", "the", "description", "of", "a", "config", "key", ".", "If", "it", "does", "not", "exist", "an", "Exception", "will", "be", "thrown", "." ]
780c1fe3b2f537463a46e335186b7741add88a1e
https://github.com/mweb/appconfig/blob/780c1fe3b2f537463a46e335186b7741add88a1e/appconfig/appconfig.py#L104-L134
245,248
mweb/appconfig
appconfig/appconfig.py
AppConfig.load_default
def load_default(self): ''' Load the default config files. First the global config file then the user config file. ''' appdir = AppDirs(self.application_name, self.application_author, version=self.application_version) file_name = os.path.join(appdir.site_data_dir, "{0}.conf".format( self.application_name.lower())) if os.path.exists(file_name): self.load(file_name) if os.name is not 'posix' or os.getuid() > 0: config_file = os.path.join(appdir.user_data_dir, '{0}.conf'.format( self.application_name.lower())) if os.path.exists(config_file): self.load(config_file)
python
def load_default(self): ''' Load the default config files. First the global config file then the user config file. ''' appdir = AppDirs(self.application_name, self.application_author, version=self.application_version) file_name = os.path.join(appdir.site_data_dir, "{0}.conf".format( self.application_name.lower())) if os.path.exists(file_name): self.load(file_name) if os.name is not 'posix' or os.getuid() > 0: config_file = os.path.join(appdir.user_data_dir, '{0}.conf'.format( self.application_name.lower())) if os.path.exists(config_file): self.load(config_file)
[ "def", "load_default", "(", "self", ")", ":", "appdir", "=", "AppDirs", "(", "self", ".", "application_name", ",", "self", ".", "application_author", ",", "version", "=", "self", ".", "application_version", ")", "file_name", "=", "os", ".", "path", ".", "join", "(", "appdir", ".", "site_data_dir", ",", "\"{0}.conf\"", ".", "format", "(", "self", ".", "application_name", ".", "lower", "(", ")", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "file_name", ")", ":", "self", ".", "load", "(", "file_name", ")", "if", "os", ".", "name", "is", "not", "'posix'", "or", "os", ".", "getuid", "(", ")", ">", "0", ":", "config_file", "=", "os", ".", "path", ".", "join", "(", "appdir", ".", "user_data_dir", ",", "'{0}.conf'", ".", "format", "(", "self", ".", "application_name", ".", "lower", "(", ")", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "config_file", ")", ":", "self", ".", "load", "(", "config_file", ")" ]
Load the default config files. First the global config file then the user config file.
[ "Load", "the", "default", "config", "files", ".", "First", "the", "global", "config", "file", "then", "the", "user", "config", "file", "." ]
780c1fe3b2f537463a46e335186b7741add88a1e
https://github.com/mweb/appconfig/blob/780c1fe3b2f537463a46e335186b7741add88a1e/appconfig/appconfig.py#L136-L150
245,249
mweb/appconfig
appconfig/appconfig.py
AppConfig.load
def load(self, filename): ''' Load the given config file. @param filename: the filename including the path to load. ''' if not os.path.exists(filename): #print 'Could not load config file [%s]' % (filename) raise AppConfigValueException('Could not load config file {0}'. format(filename)) cfl = open(filename, 'r') if PY2: self.readfp(cfl) else: self.read_file(cfl) cfl.close()
python
def load(self, filename): ''' Load the given config file. @param filename: the filename including the path to load. ''' if not os.path.exists(filename): #print 'Could not load config file [%s]' % (filename) raise AppConfigValueException('Could not load config file {0}'. format(filename)) cfl = open(filename, 'r') if PY2: self.readfp(cfl) else: self.read_file(cfl) cfl.close()
[ "def", "load", "(", "self", ",", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "#print 'Could not load config file [%s]' % (filename)", "raise", "AppConfigValueException", "(", "'Could not load config file {0}'", ".", "format", "(", "filename", ")", ")", "cfl", "=", "open", "(", "filename", ",", "'r'", ")", "if", "PY2", ":", "self", ".", "readfp", "(", "cfl", ")", "else", ":", "self", ".", "read_file", "(", "cfl", ")", "cfl", ".", "close", "(", ")" ]
Load the given config file. @param filename: the filename including the path to load.
[ "Load", "the", "given", "config", "file", "." ]
780c1fe3b2f537463a46e335186b7741add88a1e
https://github.com/mweb/appconfig/blob/780c1fe3b2f537463a46e335186b7741add88a1e/appconfig/appconfig.py#L152-L166
245,250
mweb/appconfig
appconfig/appconfig.py
AppConfig.get
def get(self, section, key): ''' Get the value of a key in the given section. It will automatically translate the paramter type if the parameter has a type specified with the description. @param section: the section where the key can be found. @param key: the key the value is stored under. @return the value as a string or the specified type. @exception: if the type is specified and the value could not be translated to the given type. ''' section = section.lower() key = key.lower() descr, value_type, default = self.get_description(section, key) value = ConfigParser.get(self, section, key) if value_type == bool: if PY2: if value.lower() not in self._boolean_states: raise AppConfigValueException('Not a boolean: {0}'. format(value)) return self._boolean_states[value.lower()] else: try: return self._convert_to_boolean(value) except ValueError: raise AppConfigValueException('Not a boolean: {0}'. format(value)) return value_type(value)
python
def get(self, section, key): ''' Get the value of a key in the given section. It will automatically translate the paramter type if the parameter has a type specified with the description. @param section: the section where the key can be found. @param key: the key the value is stored under. @return the value as a string or the specified type. @exception: if the type is specified and the value could not be translated to the given type. ''' section = section.lower() key = key.lower() descr, value_type, default = self.get_description(section, key) value = ConfigParser.get(self, section, key) if value_type == bool: if PY2: if value.lower() not in self._boolean_states: raise AppConfigValueException('Not a boolean: {0}'. format(value)) return self._boolean_states[value.lower()] else: try: return self._convert_to_boolean(value) except ValueError: raise AppConfigValueException('Not a boolean: {0}'. format(value)) return value_type(value)
[ "def", "get", "(", "self", ",", "section", ",", "key", ")", ":", "section", "=", "section", ".", "lower", "(", ")", "key", "=", "key", ".", "lower", "(", ")", "descr", ",", "value_type", ",", "default", "=", "self", ".", "get_description", "(", "section", ",", "key", ")", "value", "=", "ConfigParser", ".", "get", "(", "self", ",", "section", ",", "key", ")", "if", "value_type", "==", "bool", ":", "if", "PY2", ":", "if", "value", ".", "lower", "(", ")", "not", "in", "self", ".", "_boolean_states", ":", "raise", "AppConfigValueException", "(", "'Not a boolean: {0}'", ".", "format", "(", "value", ")", ")", "return", "self", ".", "_boolean_states", "[", "value", ".", "lower", "(", ")", "]", "else", ":", "try", ":", "return", "self", ".", "_convert_to_boolean", "(", "value", ")", "except", "ValueError", ":", "raise", "AppConfigValueException", "(", "'Not a boolean: {0}'", ".", "format", "(", "value", ")", ")", "return", "value_type", "(", "value", ")" ]
Get the value of a key in the given section. It will automatically translate the paramter type if the parameter has a type specified with the description. @param section: the section where the key can be found. @param key: the key the value is stored under. @return the value as a string or the specified type. @exception: if the type is specified and the value could not be translated to the given type.
[ "Get", "the", "value", "of", "a", "key", "in", "the", "given", "section", ".", "It", "will", "automatically", "translate", "the", "paramter", "type", "if", "the", "parameter", "has", "a", "type", "specified", "with", "the", "description", "." ]
780c1fe3b2f537463a46e335186b7741add88a1e
https://github.com/mweb/appconfig/blob/780c1fe3b2f537463a46e335186b7741add88a1e/appconfig/appconfig.py#L168-L196
245,251
mweb/appconfig
appconfig/appconfig.py
AppConfig.set
def set(self, section, key, value): ''' Set the value for a key in the given section. It will check the type of the value if it is available. If the value is not from the given type it will be transformed to the type. An exception will be thrown if there is a problem with the conversion. @param section: the section of the key @param key: the key where to store the valu @param value: the value to store @exception: If there is a problem with the conversation of the value type. ''' value_type = str if self.has_option(section, key): descr, value_type, default = self.get_description(section, key) if value_type != type(value): if value_type == bool: if ((type(value) in string_types and value.lower() in ('true', 't')) or (type(value) == int and value > 0)): value = True elif ((type(value) in string_types and value.lower() in ('false', 'f')) or (type(value) == int and value == 0)): value = False else: raise AppConfigValueException('Could not convert ' 'boolean type: {0}'.format(value)) else: value = value_type(value) if not self.has_section(section): self.add_section(section) ConfigParser.set(self, section, key, str(value))
python
def set(self, section, key, value): ''' Set the value for a key in the given section. It will check the type of the value if it is available. If the value is not from the given type it will be transformed to the type. An exception will be thrown if there is a problem with the conversion. @param section: the section of the key @param key: the key where to store the valu @param value: the value to store @exception: If there is a problem with the conversation of the value type. ''' value_type = str if self.has_option(section, key): descr, value_type, default = self.get_description(section, key) if value_type != type(value): if value_type == bool: if ((type(value) in string_types and value.lower() in ('true', 't')) or (type(value) == int and value > 0)): value = True elif ((type(value) in string_types and value.lower() in ('false', 'f')) or (type(value) == int and value == 0)): value = False else: raise AppConfigValueException('Could not convert ' 'boolean type: {0}'.format(value)) else: value = value_type(value) if not self.has_section(section): self.add_section(section) ConfigParser.set(self, section, key, str(value))
[ "def", "set", "(", "self", ",", "section", ",", "key", ",", "value", ")", ":", "value_type", "=", "str", "if", "self", ".", "has_option", "(", "section", ",", "key", ")", ":", "descr", ",", "value_type", ",", "default", "=", "self", ".", "get_description", "(", "section", ",", "key", ")", "if", "value_type", "!=", "type", "(", "value", ")", ":", "if", "value_type", "==", "bool", ":", "if", "(", "(", "type", "(", "value", ")", "in", "string_types", "and", "value", ".", "lower", "(", ")", "in", "(", "'true'", ",", "'t'", ")", ")", "or", "(", "type", "(", "value", ")", "==", "int", "and", "value", ">", "0", ")", ")", ":", "value", "=", "True", "elif", "(", "(", "type", "(", "value", ")", "in", "string_types", "and", "value", ".", "lower", "(", ")", "in", "(", "'false'", ",", "'f'", ")", ")", "or", "(", "type", "(", "value", ")", "==", "int", "and", "value", "==", "0", ")", ")", ":", "value", "=", "False", "else", ":", "raise", "AppConfigValueException", "(", "'Could not convert '", "'boolean type: {0}'", ".", "format", "(", "value", ")", ")", "else", ":", "value", "=", "value_type", "(", "value", ")", "if", "not", "self", ".", "has_section", "(", "section", ")", ":", "self", ".", "add_section", "(", "section", ")", "ConfigParser", ".", "set", "(", "self", ",", "section", ",", "key", ",", "str", "(", "value", ")", ")" ]
Set the value for a key in the given section. It will check the type of the value if it is available. If the value is not from the given type it will be transformed to the type. An exception will be thrown if there is a problem with the conversion. @param section: the section of the key @param key: the key where to store the valu @param value: the value to store @exception: If there is a problem with the conversation of the value type.
[ "Set", "the", "value", "for", "a", "key", "in", "the", "given", "section", ".", "It", "will", "check", "the", "type", "of", "the", "value", "if", "it", "is", "available", ".", "If", "the", "value", "is", "not", "from", "the", "given", "type", "it", "will", "be", "transformed", "to", "the", "type", ".", "An", "exception", "will", "be", "thrown", "if", "there", "is", "a", "problem", "with", "the", "conversion", "." ]
780c1fe3b2f537463a46e335186b7741add88a1e
https://github.com/mweb/appconfig/blob/780c1fe3b2f537463a46e335186b7741add88a1e/appconfig/appconfig.py#L198-L234
245,252
mweb/appconfig
appconfig/appconfig.py
AppConfig.save
def save(self, filename=None, verbose=False): ''' Save the config to the given file or to given default location. @param filename: the file to write the config @param verbose: If set to true the config file will have all values and all descriptions ''' if filename is None: if (not self.has_section(self.application_name) or not self.has_option(self.application_name, 'config_file')): if not self.has_section(self.application_name): self.add_section(self.application_name) if not self.application_name in self.config_description: self.config_description[self.application_name] = {} appdir = AppDirs(self.application_name, self.application_author, version=self.application_version) value = os.path.join(appdir.user_data_dir, '{0}.conf'.format( self.application_name.lower())) if not self.has_option(self.application_name, 'config_file'): self.set(self.application_name, 'config_file', value) if not ('config_file' in self.config_description[self.application_name]): self.config_description[self.application_name]\ ['config_file'] = ('The config file to ' 'overwrite on change of the config values. ' '[$HOME/.{0}/{0}.conf]'.format( self.application_name), str, value) filename = self.get(self.application_name, 'config_file') if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) hidden = None if self.has_section('hidden'): hidden = self.items('hidden') self.remove_section('hidden') cfp = open(filename, 'w') self._write_config(cfp, verbose) cfp.close() if hidden is not None: for key, value in hidden: self.set('hidden', key, value)
python
def save(self, filename=None, verbose=False): ''' Save the config to the given file or to given default location. @param filename: the file to write the config @param verbose: If set to true the config file will have all values and all descriptions ''' if filename is None: if (not self.has_section(self.application_name) or not self.has_option(self.application_name, 'config_file')): if not self.has_section(self.application_name): self.add_section(self.application_name) if not self.application_name in self.config_description: self.config_description[self.application_name] = {} appdir = AppDirs(self.application_name, self.application_author, version=self.application_version) value = os.path.join(appdir.user_data_dir, '{0}.conf'.format( self.application_name.lower())) if not self.has_option(self.application_name, 'config_file'): self.set(self.application_name, 'config_file', value) if not ('config_file' in self.config_description[self.application_name]): self.config_description[self.application_name]\ ['config_file'] = ('The config file to ' 'overwrite on change of the config values. ' '[$HOME/.{0}/{0}.conf]'.format( self.application_name), str, value) filename = self.get(self.application_name, 'config_file') if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) hidden = None if self.has_section('hidden'): hidden = self.items('hidden') self.remove_section('hidden') cfp = open(filename, 'w') self._write_config(cfp, verbose) cfp.close() if hidden is not None: for key, value in hidden: self.set('hidden', key, value)
[ "def", "save", "(", "self", ",", "filename", "=", "None", ",", "verbose", "=", "False", ")", ":", "if", "filename", "is", "None", ":", "if", "(", "not", "self", ".", "has_section", "(", "self", ".", "application_name", ")", "or", "not", "self", ".", "has_option", "(", "self", ".", "application_name", ",", "'config_file'", ")", ")", ":", "if", "not", "self", ".", "has_section", "(", "self", ".", "application_name", ")", ":", "self", ".", "add_section", "(", "self", ".", "application_name", ")", "if", "not", "self", ".", "application_name", "in", "self", ".", "config_description", ":", "self", ".", "config_description", "[", "self", ".", "application_name", "]", "=", "{", "}", "appdir", "=", "AppDirs", "(", "self", ".", "application_name", ",", "self", ".", "application_author", ",", "version", "=", "self", ".", "application_version", ")", "value", "=", "os", ".", "path", ".", "join", "(", "appdir", ".", "user_data_dir", ",", "'{0}.conf'", ".", "format", "(", "self", ".", "application_name", ".", "lower", "(", ")", ")", ")", "if", "not", "self", ".", "has_option", "(", "self", ".", "application_name", ",", "'config_file'", ")", ":", "self", ".", "set", "(", "self", ".", "application_name", ",", "'config_file'", ",", "value", ")", "if", "not", "(", "'config_file'", "in", "self", ".", "config_description", "[", "self", ".", "application_name", "]", ")", ":", "self", ".", "config_description", "[", "self", ".", "application_name", "]", "[", "'config_file'", "]", "=", "(", "'The config file to '", "'overwrite on change of the config values. '", "'[$HOME/.{0}/{0}.conf]'", ".", "format", "(", "self", ".", "application_name", ")", ",", "str", ",", "value", ")", "filename", "=", "self", ".", "get", "(", "self", ".", "application_name", ",", "'config_file'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", "hidden", "=", "None", "if", "self", ".", "has_section", "(", "'hidden'", ")", ":", "hidden", "=", "self", ".", "items", "(", "'hidden'", ")", "self", ".", "remove_section", "(", "'hidden'", ")", "cfp", "=", "open", "(", "filename", ",", "'w'", ")", "self", ".", "_write_config", "(", "cfp", ",", "verbose", ")", "cfp", ".", "close", "(", ")", "if", "hidden", "is", "not", "None", ":", "for", "key", ",", "value", "in", "hidden", ":", "self", ".", "set", "(", "'hidden'", ",", "key", ",", "value", ")" ]
Save the config to the given file or to given default location. @param filename: the file to write the config @param verbose: If set to true the config file will have all values and all descriptions
[ "Save", "the", "config", "to", "the", "given", "file", "or", "to", "given", "default", "location", "." ]
780c1fe3b2f537463a46e335186b7741add88a1e
https://github.com/mweb/appconfig/blob/780c1fe3b2f537463a46e335186b7741add88a1e/appconfig/appconfig.py#L236-L282
245,253
mweb/appconfig
appconfig/appconfig.py
AppConfig._write_config
def _write_config(self, filedesc, verbose=False): ''' Write the current config to the given filedescriptor which has must be opened for writing. Only the config values different from the default value are written If the verbose switch is turned on the config file generated will have all values including the default values shown but they will be commented out. In addition the description of each paramters is stored before the value. @param filedesc: The file to write the config into @param verbose: The switch between the minimalistic and the more verbose config file. ''' desc = [] for section in self.sections(): if section.lower() in ('', 'hidden'): continue desc.append("") desc.append('[{0}]'.format(section)) for key in self.options(section): descr, value_type, default = self.get_description(section, key) if verbose: desc.append(_format_message(descr, 78, "# ")) desc.append("# Type: [{0}]".format(str(value_type))) desc.append("# {0}={1}".format(key, default)) if not self.get(section, key) == default: desc.append('{0}={1}'.format(key, self.get(section, key))) if verbose: desc.append("") filedesc.write("{0}\n".format("\n".join(desc[1:])))
python
def _write_config(self, filedesc, verbose=False): ''' Write the current config to the given filedescriptor which has must be opened for writing. Only the config values different from the default value are written If the verbose switch is turned on the config file generated will have all values including the default values shown but they will be commented out. In addition the description of each paramters is stored before the value. @param filedesc: The file to write the config into @param verbose: The switch between the minimalistic and the more verbose config file. ''' desc = [] for section in self.sections(): if section.lower() in ('', 'hidden'): continue desc.append("") desc.append('[{0}]'.format(section)) for key in self.options(section): descr, value_type, default = self.get_description(section, key) if verbose: desc.append(_format_message(descr, 78, "# ")) desc.append("# Type: [{0}]".format(str(value_type))) desc.append("# {0}={1}".format(key, default)) if not self.get(section, key) == default: desc.append('{0}={1}'.format(key, self.get(section, key))) if verbose: desc.append("") filedesc.write("{0}\n".format("\n".join(desc[1:])))
[ "def", "_write_config", "(", "self", ",", "filedesc", ",", "verbose", "=", "False", ")", ":", "desc", "=", "[", "]", "for", "section", "in", "self", ".", "sections", "(", ")", ":", "if", "section", ".", "lower", "(", ")", "in", "(", "''", ",", "'hidden'", ")", ":", "continue", "desc", ".", "append", "(", "\"\"", ")", "desc", ".", "append", "(", "'[{0}]'", ".", "format", "(", "section", ")", ")", "for", "key", "in", "self", ".", "options", "(", "section", ")", ":", "descr", ",", "value_type", ",", "default", "=", "self", ".", "get_description", "(", "section", ",", "key", ")", "if", "verbose", ":", "desc", ".", "append", "(", "_format_message", "(", "descr", ",", "78", ",", "\"# \"", ")", ")", "desc", ".", "append", "(", "\"# Type: [{0}]\"", ".", "format", "(", "str", "(", "value_type", ")", ")", ")", "desc", ".", "append", "(", "\"# {0}={1}\"", ".", "format", "(", "key", ",", "default", ")", ")", "if", "not", "self", ".", "get", "(", "section", ",", "key", ")", "==", "default", ":", "desc", ".", "append", "(", "'{0}={1}'", ".", "format", "(", "key", ",", "self", ".", "get", "(", "section", ",", "key", ")", ")", ")", "if", "verbose", ":", "desc", ".", "append", "(", "\"\"", ")", "filedesc", ".", "write", "(", "\"{0}\\n\"", ".", "format", "(", "\"\\n\"", ".", "join", "(", "desc", "[", "1", ":", "]", ")", ")", ")" ]
Write the current config to the given filedescriptor which has must be opened for writing. Only the config values different from the default value are written If the verbose switch is turned on the config file generated will have all values including the default values shown but they will be commented out. In addition the description of each paramters is stored before the value. @param filedesc: The file to write the config into @param verbose: The switch between the minimalistic and the more verbose config file.
[ "Write", "the", "current", "config", "to", "the", "given", "filedescriptor", "which", "has", "must", "be", "opened", "for", "writing", ".", "Only", "the", "config", "values", "different", "from", "the", "default", "value", "are", "written" ]
780c1fe3b2f537463a46e335186b7741add88a1e
https://github.com/mweb/appconfig/blob/780c1fe3b2f537463a46e335186b7741add88a1e/appconfig/appconfig.py#L284-L316
245,254
sassoo/goldman
goldman/serializers/json_7159.py
Serializer.serialize
def serialize(self, data): """ Call json.dumps & let it rip """ super(Serializer, self).serialize(data) self.resp.body = json.dumps(data)
python
def serialize(self, data): """ Call json.dumps & let it rip """ super(Serializer, self).serialize(data) self.resp.body = json.dumps(data)
[ "def", "serialize", "(", "self", ",", "data", ")", ":", "super", "(", "Serializer", ",", "self", ")", ".", "serialize", "(", "data", ")", "self", ".", "resp", ".", "body", "=", "json", ".", "dumps", "(", "data", ")" ]
Call json.dumps & let it rip
[ "Call", "json", ".", "dumps", "&", "let", "it", "rip" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/serializers/json_7159.py#L21-L25
245,255
evfredericksen/pynacea
pynhost/pynhost/ruleparser.py
surround_previous_word
def surround_previous_word(input_str): ''' Surround last word in string with parentheses. If last non-whitespace character is delimiter, do nothing ''' start = None end = None for i, char in enumerate(reversed(input_str)): if start is None: if char in '{}()[]<>?|': return input_str elif char != ' ': start = i else: if char in '{}()[]<>?| ': end = i break if start is None: return input_str if end is None: end = len(input_str) new_str = '' for i, char in enumerate(reversed(input_str)): if char == ' ' and i + 1 == start: continue if i == start: new_str += ') ' elif i == end: new_str += '(' new_str += char if end == len(input_str): new_str += '(' return new_str[::-1]
python
def surround_previous_word(input_str): ''' Surround last word in string with parentheses. If last non-whitespace character is delimiter, do nothing ''' start = None end = None for i, char in enumerate(reversed(input_str)): if start is None: if char in '{}()[]<>?|': return input_str elif char != ' ': start = i else: if char in '{}()[]<>?| ': end = i break if start is None: return input_str if end is None: end = len(input_str) new_str = '' for i, char in enumerate(reversed(input_str)): if char == ' ' and i + 1 == start: continue if i == start: new_str += ') ' elif i == end: new_str += '(' new_str += char if end == len(input_str): new_str += '(' return new_str[::-1]
[ "def", "surround_previous_word", "(", "input_str", ")", ":", "start", "=", "None", "end", "=", "None", "for", "i", ",", "char", "in", "enumerate", "(", "reversed", "(", "input_str", ")", ")", ":", "if", "start", "is", "None", ":", "if", "char", "in", "'{}()[]<>?|'", ":", "return", "input_str", "elif", "char", "!=", "' '", ":", "start", "=", "i", "else", ":", "if", "char", "in", "'{}()[]<>?| '", ":", "end", "=", "i", "break", "if", "start", "is", "None", ":", "return", "input_str", "if", "end", "is", "None", ":", "end", "=", "len", "(", "input_str", ")", "new_str", "=", "''", "for", "i", ",", "char", "in", "enumerate", "(", "reversed", "(", "input_str", ")", ")", ":", "if", "char", "==", "' '", "and", "i", "+", "1", "==", "start", ":", "continue", "if", "i", "==", "start", ":", "new_str", "+=", "') '", "elif", "i", "==", "end", ":", "new_str", "+=", "'('", "new_str", "+=", "char", "if", "end", "==", "len", "(", "input_str", ")", ":", "new_str", "+=", "'('", "return", "new_str", "[", ":", ":", "-", "1", "]" ]
Surround last word in string with parentheses. If last non-whitespace character is delimiter, do nothing
[ "Surround", "last", "word", "in", "string", "with", "parentheses", ".", "If", "last", "non", "-", "whitespace", "character", "is", "delimiter", "do", "nothing" ]
63ee0e6695209048bf2571aa2c3770f502e29b0a
https://github.com/evfredericksen/pynacea/blob/63ee0e6695209048bf2571aa2c3770f502e29b0a/pynhost/pynhost/ruleparser.py#L118-L150
245,256
anoroozian/pyvt
pyvt/__init__.py
API._limit_call_handler
def _limit_call_handler(self): """ Ensure we don't exceed the N requests a minute limit by leveraging a thread lock """ # acquire a lock on our threading.Lock() object with self.limit_lock: # if we have no configured limit, exit. the lock releases based on scope if self.limit_per_min <= 0: return now = time.time() # self.limits is a list of query times + 60 seconds. In essence it is a list of times # that queries time out of the 60 second query window. # this check expires any limits that have passed self.limits = [l for l in self.limits if l > now] # and we tack on the current query self.limits.append(now + 60) # if we have more than our limit of queries (and remember, we call this before we actually # execute a query) we sleep until the oldest query on the list (element 0 because we append # new queries) times out. We don't worry about cleanup because next time this routine runs # it will clean itself up. if len(self.limits) >= self.limit_per_min: time.sleep(self.limits[0] - now)
python
def _limit_call_handler(self): """ Ensure we don't exceed the N requests a minute limit by leveraging a thread lock """ # acquire a lock on our threading.Lock() object with self.limit_lock: # if we have no configured limit, exit. the lock releases based on scope if self.limit_per_min <= 0: return now = time.time() # self.limits is a list of query times + 60 seconds. In essence it is a list of times # that queries time out of the 60 second query window. # this check expires any limits that have passed self.limits = [l for l in self.limits if l > now] # and we tack on the current query self.limits.append(now + 60) # if we have more than our limit of queries (and remember, we call this before we actually # execute a query) we sleep until the oldest query on the list (element 0 because we append # new queries) times out. We don't worry about cleanup because next time this routine runs # it will clean itself up. if len(self.limits) >= self.limit_per_min: time.sleep(self.limits[0] - now)
[ "def", "_limit_call_handler", "(", "self", ")", ":", "# acquire a lock on our threading.Lock() object", "with", "self", ".", "limit_lock", ":", "# if we have no configured limit, exit. the lock releases based on scope", "if", "self", ".", "limit_per_min", "<=", "0", ":", "return", "now", "=", "time", ".", "time", "(", ")", "# self.limits is a list of query times + 60 seconds. In essence it is a list of times", "# that queries time out of the 60 second query window.", "# this check expires any limits that have passed", "self", ".", "limits", "=", "[", "l", "for", "l", "in", "self", ".", "limits", "if", "l", ">", "now", "]", "# and we tack on the current query", "self", ".", "limits", ".", "append", "(", "now", "+", "60", ")", "# if we have more than our limit of queries (and remember, we call this before we actually", "# execute a query) we sleep until the oldest query on the list (element 0 because we append", "# new queries) times out. We don't worry about cleanup because next time this routine runs", "# it will clean itself up.", "if", "len", "(", "self", ".", "limits", ")", ">=", "self", ".", "limit_per_min", ":", "time", ".", "sleep", "(", "self", ".", "limits", "[", "0", "]", "-", "now", ")" ]
Ensure we don't exceed the N requests a minute limit by leveraging a thread lock
[ "Ensure", "we", "don", "t", "exceed", "the", "N", "requests", "a", "minute", "limit", "by", "leveraging", "a", "thread", "lock" ]
bf36f833f1f1b7d93169fd9ad451e06b7d46afc1
https://github.com/anoroozian/pyvt/blob/bf36f833f1f1b7d93169fd9ad451e06b7d46afc1/pyvt/__init__.py#L55-L79
245,257
lambdalisue/django-roughpages
src/roughpages/views.py
roughpage
def roughpage(request, url): """ Public interface to the rough page view. """ if settings.APPEND_SLASH and not url.endswith('/'): # redirect to the url which have end slash return redirect(url + '/', permanent=True) # get base filename from url filename = url_to_filename(url) # try to find the template_filename with backends template_filenames = get_backend().prepare_filenames(filename, request=request) # add extra prefix path root = settings.ROUGHPAGES_TEMPLATE_DIR template_filenames = [os.path.join(root, x) for x in template_filenames] try: t = loader.select_template(template_filenames) return render_roughpage(request, t) except TemplateDoesNotExist: if settings.ROUGHPAGES_RAISE_TEMPLATE_DOES_NOT_EXISTS: raise raise Http404
python
def roughpage(request, url): """ Public interface to the rough page view. """ if settings.APPEND_SLASH and not url.endswith('/'): # redirect to the url which have end slash return redirect(url + '/', permanent=True) # get base filename from url filename = url_to_filename(url) # try to find the template_filename with backends template_filenames = get_backend().prepare_filenames(filename, request=request) # add extra prefix path root = settings.ROUGHPAGES_TEMPLATE_DIR template_filenames = [os.path.join(root, x) for x in template_filenames] try: t = loader.select_template(template_filenames) return render_roughpage(request, t) except TemplateDoesNotExist: if settings.ROUGHPAGES_RAISE_TEMPLATE_DOES_NOT_EXISTS: raise raise Http404
[ "def", "roughpage", "(", "request", ",", "url", ")", ":", "if", "settings", ".", "APPEND_SLASH", "and", "not", "url", ".", "endswith", "(", "'/'", ")", ":", "# redirect to the url which have end slash", "return", "redirect", "(", "url", "+", "'/'", ",", "permanent", "=", "True", ")", "# get base filename from url", "filename", "=", "url_to_filename", "(", "url", ")", "# try to find the template_filename with backends", "template_filenames", "=", "get_backend", "(", ")", ".", "prepare_filenames", "(", "filename", ",", "request", "=", "request", ")", "# add extra prefix path", "root", "=", "settings", ".", "ROUGHPAGES_TEMPLATE_DIR", "template_filenames", "=", "[", "os", ".", "path", ".", "join", "(", "root", ",", "x", ")", "for", "x", "in", "template_filenames", "]", "try", ":", "t", "=", "loader", ".", "select_template", "(", "template_filenames", ")", "return", "render_roughpage", "(", "request", ",", "t", ")", "except", "TemplateDoesNotExist", ":", "if", "settings", ".", "ROUGHPAGES_RAISE_TEMPLATE_DOES_NOT_EXISTS", ":", "raise", "raise", "Http404" ]
Public interface to the rough page view.
[ "Public", "interface", "to", "the", "rough", "page", "view", "." ]
f6a2724ece729c5deced2c2546d172561ef785ec
https://github.com/lambdalisue/django-roughpages/blob/f6a2724ece729c5deced2c2546d172561ef785ec/src/roughpages/views.py#L25-L46
245,258
lambdalisue/django-roughpages
src/roughpages/views.py
render_roughpage
def render_roughpage(request, t): """ Internal interface to the rough page view. """ import django if django.VERSION >= (1, 8): c = {} response = HttpResponse(t.render(c, request)) else: c = RequestContext(request) response = HttpResponse(t.render(c)) return response
python
def render_roughpage(request, t): """ Internal interface to the rough page view. """ import django if django.VERSION >= (1, 8): c = {} response = HttpResponse(t.render(c, request)) else: c = RequestContext(request) response = HttpResponse(t.render(c)) return response
[ "def", "render_roughpage", "(", "request", ",", "t", ")", ":", "import", "django", "if", "django", ".", "VERSION", ">=", "(", "1", ",", "8", ")", ":", "c", "=", "{", "}", "response", "=", "HttpResponse", "(", "t", ".", "render", "(", "c", ",", "request", ")", ")", "else", ":", "c", "=", "RequestContext", "(", "request", ")", "response", "=", "HttpResponse", "(", "t", ".", "render", "(", "c", ")", ")", "return", "response" ]
Internal interface to the rough page view.
[ "Internal", "interface", "to", "the", "rough", "page", "view", "." ]
f6a2724ece729c5deced2c2546d172561ef785ec
https://github.com/lambdalisue/django-roughpages/blob/f6a2724ece729c5deced2c2546d172561ef785ec/src/roughpages/views.py#L50-L61
245,259
ronaldguillen/wave
wave/request.py
is_form_media_type
def is_form_media_type(media_type): """ Return True if the media type is a valid form media type. """ base_media_type, params = parse_header(media_type.encode(HTTP_HEADER_ENCODING)) return (base_media_type == 'application/x-www-form-urlencoded' or base_media_type == 'multipart/form-data')
python
def is_form_media_type(media_type): """ Return True if the media type is a valid form media type. """ base_media_type, params = parse_header(media_type.encode(HTTP_HEADER_ENCODING)) return (base_media_type == 'application/x-www-form-urlencoded' or base_media_type == 'multipart/form-data')
[ "def", "is_form_media_type", "(", "media_type", ")", ":", "base_media_type", ",", "params", "=", "parse_header", "(", "media_type", ".", "encode", "(", "HTTP_HEADER_ENCODING", ")", ")", "return", "(", "base_media_type", "==", "'application/x-www-form-urlencoded'", "or", "base_media_type", "==", "'multipart/form-data'", ")" ]
Return True if the media type is a valid form media type.
[ "Return", "True", "if", "the", "media", "type", "is", "a", "valid", "form", "media", "type", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/request.py#L25-L31
245,260
ronaldguillen/wave
wave/request.py
clone_request
def clone_request(request, method): """ Internal helper method to clone a request, replacing with a different HTTP method. Used for checking permissions against other methods. """ ret = Request(request=request._request, parsers=request.parsers, authenticators=request.authenticators, negotiator=request.negotiator, parser_context=request.parser_context) ret._data = request._data ret._files = request._files ret._full_data = request._full_data ret._content_type = request._content_type ret._stream = request._stream ret.method = method if hasattr(request, '_user'): ret._user = request._user if hasattr(request, '_auth'): ret._auth = request._auth if hasattr(request, '_authenticator'): ret._authenticator = request._authenticator if hasattr(request, 'accepted_renderer'): ret.accepted_renderer = request.accepted_renderer if hasattr(request, 'accepted_media_type'): ret.accepted_media_type = request.accepted_media_type if hasattr(request, 'version'): ret.version = request.version if hasattr(request, 'versioning_scheme'): ret.versioning_scheme = request.versioning_scheme return ret
python
def clone_request(request, method): """ Internal helper method to clone a request, replacing with a different HTTP method. Used for checking permissions against other methods. """ ret = Request(request=request._request, parsers=request.parsers, authenticators=request.authenticators, negotiator=request.negotiator, parser_context=request.parser_context) ret._data = request._data ret._files = request._files ret._full_data = request._full_data ret._content_type = request._content_type ret._stream = request._stream ret.method = method if hasattr(request, '_user'): ret._user = request._user if hasattr(request, '_auth'): ret._auth = request._auth if hasattr(request, '_authenticator'): ret._authenticator = request._authenticator if hasattr(request, 'accepted_renderer'): ret.accepted_renderer = request.accepted_renderer if hasattr(request, 'accepted_media_type'): ret.accepted_media_type = request.accepted_media_type if hasattr(request, 'version'): ret.version = request.version if hasattr(request, 'versioning_scheme'): ret.versioning_scheme = request.versioning_scheme return ret
[ "def", "clone_request", "(", "request", ",", "method", ")", ":", "ret", "=", "Request", "(", "request", "=", "request", ".", "_request", ",", "parsers", "=", "request", ".", "parsers", ",", "authenticators", "=", "request", ".", "authenticators", ",", "negotiator", "=", "request", ".", "negotiator", ",", "parser_context", "=", "request", ".", "parser_context", ")", "ret", ".", "_data", "=", "request", ".", "_data", "ret", ".", "_files", "=", "request", ".", "_files", "ret", ".", "_full_data", "=", "request", ".", "_full_data", "ret", ".", "_content_type", "=", "request", ".", "_content_type", "ret", ".", "_stream", "=", "request", ".", "_stream", "ret", ".", "method", "=", "method", "if", "hasattr", "(", "request", ",", "'_user'", ")", ":", "ret", ".", "_user", "=", "request", ".", "_user", "if", "hasattr", "(", "request", ",", "'_auth'", ")", ":", "ret", ".", "_auth", "=", "request", ".", "_auth", "if", "hasattr", "(", "request", ",", "'_authenticator'", ")", ":", "ret", ".", "_authenticator", "=", "request", ".", "_authenticator", "if", "hasattr", "(", "request", ",", "'accepted_renderer'", ")", ":", "ret", ".", "accepted_renderer", "=", "request", ".", "accepted_renderer", "if", "hasattr", "(", "request", ",", "'accepted_media_type'", ")", ":", "ret", ".", "accepted_media_type", "=", "request", ".", "accepted_media_type", "if", "hasattr", "(", "request", ",", "'version'", ")", ":", "ret", ".", "version", "=", "request", ".", "version", "if", "hasattr", "(", "request", ",", "'versioning_scheme'", ")", ":", "ret", ".", "versioning_scheme", "=", "request", ".", "versioning_scheme", "return", "ret" ]
Internal helper method to clone a request, replacing with a different HTTP method. Used for checking permissions against other methods.
[ "Internal", "helper", "method", "to", "clone", "a", "request", "replacing", "with", "a", "different", "HTTP", "method", ".", "Used", "for", "checking", "permissions", "against", "other", "methods", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/request.py#L74-L104
245,261
ronaldguillen/wave
wave/request.py
Request.user
def user(self, value): """ Sets the user on the current request. This is necessary to maintain compatibility with django.contrib.auth where the user property is set in the login and logout functions. Note that we also set the user on Django's underlying `HttpRequest` instance, ensuring that it is available to any middleware in the stack. """ self._user = value self._request.user = value
python
def user(self, value): """ Sets the user on the current request. This is necessary to maintain compatibility with django.contrib.auth where the user property is set in the login and logout functions. Note that we also set the user on Django's underlying `HttpRequest` instance, ensuring that it is available to any middleware in the stack. """ self._user = value self._request.user = value
[ "def", "user", "(", "self", ",", "value", ")", ":", "self", ".", "_user", "=", "value", "self", ".", "_request", ".", "user", "=", "value" ]
Sets the user on the current request. This is necessary to maintain compatibility with django.contrib.auth where the user property is set in the login and logout functions. Note that we also set the user on Django's underlying `HttpRequest` instance, ensuring that it is available to any middleware in the stack.
[ "Sets", "the", "user", "on", "the", "current", "request", ".", "This", "is", "necessary", "to", "maintain", "compatibility", "with", "django", ".", "contrib", ".", "auth", "where", "the", "user", "property", "is", "set", "in", "the", "login", "and", "logout", "functions", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/request.py#L197-L207
245,262
ronaldguillen/wave
wave/request.py
Request.auth
def auth(self, value): """ Sets any non-user authentication information associated with the request, such as an authentication token. """ self._auth = value self._request.auth = value
python
def auth(self, value): """ Sets any non-user authentication information associated with the request, such as an authentication token. """ self._auth = value self._request.auth = value
[ "def", "auth", "(", "self", ",", "value", ")", ":", "self", ".", "_auth", "=", "value", "self", ".", "_request", ".", "auth", "=", "value" ]
Sets any non-user authentication information associated with the request, such as an authentication token.
[ "Sets", "any", "non", "-", "user", "authentication", "information", "associated", "with", "the", "request", "such", "as", "an", "authentication", "token", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/request.py#L220-L226
245,263
ronaldguillen/wave
wave/request.py
Request._load_data_and_files
def _load_data_and_files(self): """ Parses the request content into `self.data`. """ if not _hasattr(self, '_data'): self._data, self._files = self._parse() if self._files: self._full_data = self._data.copy() self._full_data.update(self._files) else: self._full_data = self._data
python
def _load_data_and_files(self): """ Parses the request content into `self.data`. """ if not _hasattr(self, '_data'): self._data, self._files = self._parse() if self._files: self._full_data = self._data.copy() self._full_data.update(self._files) else: self._full_data = self._data
[ "def", "_load_data_and_files", "(", "self", ")", ":", "if", "not", "_hasattr", "(", "self", ",", "'_data'", ")", ":", "self", ".", "_data", ",", "self", ".", "_files", "=", "self", ".", "_parse", "(", ")", "if", "self", ".", "_files", ":", "self", ".", "_full_data", "=", "self", ".", "_data", ".", "copy", "(", ")", "self", ".", "_full_data", ".", "update", "(", "self", ".", "_files", ")", "else", ":", "self", ".", "_full_data", "=", "self", ".", "_data" ]
Parses the request content into `self.data`.
[ "Parses", "the", "request", "content", "into", "self", ".", "data", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/request.py#L238-L248
245,264
ronaldguillen/wave
wave/request.py
Request._load_stream
def _load_stream(self): """ Return the content body of the request, as a stream. """ meta = self._request.META try: content_length = int( meta.get('CONTENT_LENGTH', meta.get('HTTP_CONTENT_LENGTH', 0)) ) except (ValueError, TypeError): content_length = 0 if content_length == 0: self._stream = None elif hasattr(self._request, 'read'): self._stream = self._request else: self._stream = six.BytesIO(self.raw_post_data)
python
def _load_stream(self): """ Return the content body of the request, as a stream. """ meta = self._request.META try: content_length = int( meta.get('CONTENT_LENGTH', meta.get('HTTP_CONTENT_LENGTH', 0)) ) except (ValueError, TypeError): content_length = 0 if content_length == 0: self._stream = None elif hasattr(self._request, 'read'): self._stream = self._request else: self._stream = six.BytesIO(self.raw_post_data)
[ "def", "_load_stream", "(", "self", ")", ":", "meta", "=", "self", ".", "_request", ".", "META", "try", ":", "content_length", "=", "int", "(", "meta", ".", "get", "(", "'CONTENT_LENGTH'", ",", "meta", ".", "get", "(", "'HTTP_CONTENT_LENGTH'", ",", "0", ")", ")", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "content_length", "=", "0", "if", "content_length", "==", "0", ":", "self", ".", "_stream", "=", "None", "elif", "hasattr", "(", "self", ".", "_request", ",", "'read'", ")", ":", "self", ".", "_stream", "=", "self", ".", "_request", "else", ":", "self", ".", "_stream", "=", "six", ".", "BytesIO", "(", "self", ".", "raw_post_data", ")" ]
Return the content body of the request, as a stream.
[ "Return", "the", "content", "body", "of", "the", "request", "as", "a", "stream", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/request.py#L250-L267
245,265
LesPatamechanix/patalib
src/patalib/antonym.py
Antonym.generate_antonym
def generate_antonym(self, input_word): """ Generate an antonym using a Synset and its lemmas. """ results = [] synset = wordnet.synsets(input_word) for i in synset: if i.pos in ['n','v']: for j in i.lemmas: if j.antonyms(): name = j.antonyms()[0].name results.append(PataLib().strip_underscore(name)) results = {'input' : input_word, 'results' : results, 'category' : 'antonym'} return results
python
def generate_antonym(self, input_word): """ Generate an antonym using a Synset and its lemmas. """ results = [] synset = wordnet.synsets(input_word) for i in synset: if i.pos in ['n','v']: for j in i.lemmas: if j.antonyms(): name = j.antonyms()[0].name results.append(PataLib().strip_underscore(name)) results = {'input' : input_word, 'results' : results, 'category' : 'antonym'} return results
[ "def", "generate_antonym", "(", "self", ",", "input_word", ")", ":", "results", "=", "[", "]", "synset", "=", "wordnet", ".", "synsets", "(", "input_word", ")", "for", "i", "in", "synset", ":", "if", "i", ".", "pos", "in", "[", "'n'", ",", "'v'", "]", ":", "for", "j", "in", "i", ".", "lemmas", ":", "if", "j", ".", "antonyms", "(", ")", ":", "name", "=", "j", ".", "antonyms", "(", ")", "[", "0", "]", ".", "name", "results", ".", "append", "(", "PataLib", "(", ")", ".", "strip_underscore", "(", "name", ")", ")", "results", "=", "{", "'input'", ":", "input_word", ",", "'results'", ":", "results", ",", "'category'", ":", "'antonym'", "}", "return", "results" ]
Generate an antonym using a Synset and its lemmas.
[ "Generate", "an", "antonym", "using", "a", "Synset", "and", "its", "lemmas", "." ]
d88cca409b1750fdeb88cece048b308f2a710955
https://github.com/LesPatamechanix/patalib/blob/d88cca409b1750fdeb88cece048b308f2a710955/src/patalib/antonym.py#L9-L22
245,266
ikalnytskyi/dooku
dooku/ext.py
ExtensionManager.get
def get(self, name, default=None): """ Returns an extension instance with a given name. In case there are few extensions with a given name, the first one will be returned. If no extensions with a given name are exist, the `default` value will be returned. :param name: (str) an extension name :param default: (object) a fallback value :returns: (object) an extension instance """ try: value = self[name] except KeyError: value = default return value
python
def get(self, name, default=None): """ Returns an extension instance with a given name. In case there are few extensions with a given name, the first one will be returned. If no extensions with a given name are exist, the `default` value will be returned. :param name: (str) an extension name :param default: (object) a fallback value :returns: (object) an extension instance """ try: value = self[name] except KeyError: value = default return value
[ "def", "get", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "try", ":", "value", "=", "self", "[", "name", "]", "except", "KeyError", ":", "value", "=", "default", "return", "value" ]
Returns an extension instance with a given name. In case there are few extensions with a given name, the first one will be returned. If no extensions with a given name are exist, the `default` value will be returned. :param name: (str) an extension name :param default: (object) a fallback value :returns: (object) an extension instance
[ "Returns", "an", "extension", "instance", "with", "a", "given", "name", "." ]
77e6c82c9c41211c86ee36ae5e591d477945fedf
https://github.com/ikalnytskyi/dooku/blob/77e6c82c9c41211c86ee36ae5e591d477945fedf/dooku/ext.py#L98-L114
245,267
radjkarl/fancyTools
fancytools/math/line.py
sort
def sort(line): """ change point position if x1,y0 < x0,y0 """ x0, y0, x1, y1 = line # if (x0**2+y0**2)**0.5 < (x1**2+y1**2)**0.5: # return (x1,y1,x0,y0) # return line # # if x1 < x0: # return (x1,y1,x0,y0) # return line turn = False if abs(x1 - x0) > abs(y1 - y0): if x1 < x0: turn = True elif y1 < y0: turn = True if turn: return (x1, y1, x0, y0) # return line[(2,3,0,1)] return line
python
def sort(line): """ change point position if x1,y0 < x0,y0 """ x0, y0, x1, y1 = line # if (x0**2+y0**2)**0.5 < (x1**2+y1**2)**0.5: # return (x1,y1,x0,y0) # return line # # if x1 < x0: # return (x1,y1,x0,y0) # return line turn = False if abs(x1 - x0) > abs(y1 - y0): if x1 < x0: turn = True elif y1 < y0: turn = True if turn: return (x1, y1, x0, y0) # return line[(2,3,0,1)] return line
[ "def", "sort", "(", "line", ")", ":", "x0", ",", "y0", ",", "x1", ",", "y1", "=", "line", "# if (x0**2+y0**2)**0.5 < (x1**2+y1**2)**0.5:", "# return (x1,y1,x0,y0)", "# return line", "#", "# if x1 < x0:", "# return (x1,y1,x0,y0)", "# return line", "turn", "=", "False", "if", "abs", "(", "x1", "-", "x0", ")", ">", "abs", "(", "y1", "-", "y0", ")", ":", "if", "x1", "<", "x0", ":", "turn", "=", "True", "elif", "y1", "<", "y0", ":", "turn", "=", "True", "if", "turn", ":", "return", "(", "x1", ",", "y1", ",", "x0", ",", "y0", ")", "# return line[(2,3,0,1)]", "return", "line" ]
change point position if x1,y0 < x0,y0
[ "change", "point", "position", "if", "x1", "y0", "<", "x0", "y0" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/line.py#L49-L73
245,268
radjkarl/fancyTools
fancytools/math/line.py
dxdy
def dxdy(line): """ return normalised ascent vector """ x0, y0, x1, y1 = line dx = float(x1 - x0) dy = float(y1 - y0) f = hypot(dx, dy) return dx / f, dy / f
python
def dxdy(line): """ return normalised ascent vector """ x0, y0, x1, y1 = line dx = float(x1 - x0) dy = float(y1 - y0) f = hypot(dx, dy) return dx / f, dy / f
[ "def", "dxdy", "(", "line", ")", ":", "x0", ",", "y0", ",", "x1", ",", "y1", "=", "line", "dx", "=", "float", "(", "x1", "-", "x0", ")", "dy", "=", "float", "(", "y1", "-", "y0", ")", "f", "=", "hypot", "(", "dx", ",", "dy", ")", "return", "dx", "/", "f", ",", "dy", "/", "f" ]
return normalised ascent vector
[ "return", "normalised", "ascent", "vector" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/line.py#L104-L112
245,269
radjkarl/fancyTools
fancytools/math/line.py
fromAttr
def fromAttr(mid, ang, dist): """ create from middle, angle and distance """ mx, my = mid dx = cos(ang) * dist * 0.5 dy = sin(ang) * dist * 0.5 return mx - dx, my - dy, mx + dx, my + dy
python
def fromAttr(mid, ang, dist): """ create from middle, angle and distance """ mx, my = mid dx = cos(ang) * dist * 0.5 dy = sin(ang) * dist * 0.5 return mx - dx, my - dy, mx + dx, my + dy
[ "def", "fromAttr", "(", "mid", ",", "ang", ",", "dist", ")", ":", "mx", ",", "my", "=", "mid", "dx", "=", "cos", "(", "ang", ")", "*", "dist", "*", "0.5", "dy", "=", "sin", "(", "ang", ")", "*", "dist", "*", "0.5", "return", "mx", "-", "dx", ",", "my", "-", "dy", ",", "mx", "+", "dx", ",", "my", "+", "dy" ]
create from middle, angle and distance
[ "create", "from", "middle", "angle", "and", "distance" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/line.py#L151-L158
245,270
radjkarl/fancyTools
fancytools/math/line.py
fromAttr2
def fromAttr2(start, ang, dist): """ create from start, angle and distance """ sx, sy = start dx = cos(ang) * dist dy = sin(ang) * dist return sx, sy, sx + dx, sy + dy
python
def fromAttr2(start, ang, dist): """ create from start, angle and distance """ sx, sy = start dx = cos(ang) * dist dy = sin(ang) * dist return sx, sy, sx + dx, sy + dy
[ "def", "fromAttr2", "(", "start", ",", "ang", ",", "dist", ")", ":", "sx", ",", "sy", "=", "start", "dx", "=", "cos", "(", "ang", ")", "*", "dist", "dy", "=", "sin", "(", "ang", ")", "*", "dist", "return", "sx", ",", "sy", ",", "sx", "+", "dx", ",", "sy", "+", "dy" ]
create from start, angle and distance
[ "create", "from", "start", "angle", "and", "distance" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/line.py#L161-L168
245,271
radjkarl/fancyTools
fancytools/math/line.py
merge
def merge(l1, l2): """ merge 2 lines together """ x1, y1, x2, y2 = l1 xx1, yy1, xx2, yy2 = l2 comb = ((x1, y1, xx1, yy1), (x1, y1, xx2, yy2), (x2, y2, xx1, yy1), (x2, y2, xx2, yy2)) d = [length(c) for c in comb] i = argmax(d) dist = d[i] mid = middle(comb[i]) a = (angle(l1) + angle(l2)) * 0.5 return fromAttr(mid, a, dist)
python
def merge(l1, l2): """ merge 2 lines together """ x1, y1, x2, y2 = l1 xx1, yy1, xx2, yy2 = l2 comb = ((x1, y1, xx1, yy1), (x1, y1, xx2, yy2), (x2, y2, xx1, yy1), (x2, y2, xx2, yy2)) d = [length(c) for c in comb] i = argmax(d) dist = d[i] mid = middle(comb[i]) a = (angle(l1) + angle(l2)) * 0.5 return fromAttr(mid, a, dist)
[ "def", "merge", "(", "l1", ",", "l2", ")", ":", "x1", ",", "y1", ",", "x2", ",", "y2", "=", "l1", "xx1", ",", "yy1", ",", "xx2", ",", "yy2", "=", "l2", "comb", "=", "(", "(", "x1", ",", "y1", ",", "xx1", ",", "yy1", ")", ",", "(", "x1", ",", "y1", ",", "xx2", ",", "yy2", ")", ",", "(", "x2", ",", "y2", ",", "xx1", ",", "yy1", ")", ",", "(", "x2", ",", "y2", ",", "xx2", ",", "yy2", ")", ")", "d", "=", "[", "length", "(", "c", ")", "for", "c", "in", "comb", "]", "i", "=", "argmax", "(", "d", ")", "dist", "=", "d", "[", "i", "]", "mid", "=", "middle", "(", "comb", "[", "i", "]", ")", "a", "=", "(", "angle", "(", "l1", ")", "+", "angle", "(", "l2", ")", ")", "*", "0.5", "return", "fromAttr", "(", "mid", ",", "a", ",", "dist", ")" ]
merge 2 lines together
[ "merge", "2", "lines", "together" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/line.py#L190-L209
245,272
radjkarl/fancyTools
fancytools/math/line.py
distance
def distance(line, point): """ infinite line to point or line to line distance is point is given as line - use middle point of that liune """ x0, y0, x1, y1 = line try: p1, p2 = point except ValueError: # line is given instead of point p1, p2 = middle(point) n1 = ascent(line) n2 = -1 n0 = y0 - n1 * x0 return abs(n1 * p1 + n2 * p2 + n0) / (n1 ** 2 + n2 ** 2) ** 0.5
python
def distance(line, point): """ infinite line to point or line to line distance is point is given as line - use middle point of that liune """ x0, y0, x1, y1 = line try: p1, p2 = point except ValueError: # line is given instead of point p1, p2 = middle(point) n1 = ascent(line) n2 = -1 n0 = y0 - n1 * x0 return abs(n1 * p1 + n2 * p2 + n0) / (n1 ** 2 + n2 ** 2) ** 0.5
[ "def", "distance", "(", "line", ",", "point", ")", ":", "x0", ",", "y0", ",", "x1", ",", "y1", "=", "line", "try", ":", "p1", ",", "p2", "=", "point", "except", "ValueError", ":", "# line is given instead of point", "p1", ",", "p2", "=", "middle", "(", "point", ")", "n1", "=", "ascent", "(", "line", ")", "n2", "=", "-", "1", "n0", "=", "y0", "-", "n1", "*", "x0", "return", "abs", "(", "n1", "*", "p1", "+", "n2", "*", "p2", "+", "n0", ")", "/", "(", "n1", "**", "2", "+", "n2", "**", "2", ")", "**", "0.5" ]
infinite line to point or line to line distance is point is given as line - use middle point of that liune
[ "infinite", "line", "to", "point", "or", "line", "to", "line", "distance", "is", "point", "is", "given", "as", "line", "-", "use", "middle", "point", "of", "that", "liune" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/line.py#L236-L250
245,273
radjkarl/fancyTools
fancytools/math/line.py
intersection
def intersection(line1, line2): """ Return the coordinates of a point of intersection given two lines. Return None if the lines are parallel, but non-colli_near. Return an arbitrary point of intersection if the lines are colli_near. Parameters: line1 and line2: lines given by 4 points (x0,y0,x1,y1). """ x1, y1, x2, y2 = line1 u1, v1, u2, v2 = line2 (a, b), (c, d) = (x2 - x1, u1 - u2), (y2 - y1, v1 - v2) e, f = u1 - x1, v1 - y1 # Solve ((a,b), (c,d)) * (t,s) = (e,f) denom = float(a * d - b * c) if _near(denom, 0): # parallel # If colli_near, the equation is solvable with t = 0. # When t=0, s would have to equal e/b and f/d if b == 0 or d == 0: return None if _near(e / b, f / d): # colli_near px = x1 py = y1 else: return None else: t = (e * d - b * f) / denom # s = (a*f - e*c)/denom px = x1 + t * (x2 - x1) py = y1 + t * (y2 - y1) return px, py
python
def intersection(line1, line2): """ Return the coordinates of a point of intersection given two lines. Return None if the lines are parallel, but non-colli_near. Return an arbitrary point of intersection if the lines are colli_near. Parameters: line1 and line2: lines given by 4 points (x0,y0,x1,y1). """ x1, y1, x2, y2 = line1 u1, v1, u2, v2 = line2 (a, b), (c, d) = (x2 - x1, u1 - u2), (y2 - y1, v1 - v2) e, f = u1 - x1, v1 - y1 # Solve ((a,b), (c,d)) * (t,s) = (e,f) denom = float(a * d - b * c) if _near(denom, 0): # parallel # If colli_near, the equation is solvable with t = 0. # When t=0, s would have to equal e/b and f/d if b == 0 or d == 0: return None if _near(e / b, f / d): # colli_near px = x1 py = y1 else: return None else: t = (e * d - b * f) / denom # s = (a*f - e*c)/denom px = x1 + t * (x2 - x1) py = y1 + t * (y2 - y1) return px, py
[ "def", "intersection", "(", "line1", ",", "line2", ")", ":", "x1", ",", "y1", ",", "x2", ",", "y2", "=", "line1", "u1", ",", "v1", ",", "u2", ",", "v2", "=", "line2", "(", "a", ",", "b", ")", ",", "(", "c", ",", "d", ")", "=", "(", "x2", "-", "x1", ",", "u1", "-", "u2", ")", ",", "(", "y2", "-", "y1", ",", "v1", "-", "v2", ")", "e", ",", "f", "=", "u1", "-", "x1", ",", "v1", "-", "y1", "# Solve ((a,b), (c,d)) * (t,s) = (e,f)", "denom", "=", "float", "(", "a", "*", "d", "-", "b", "*", "c", ")", "if", "_near", "(", "denom", ",", "0", ")", ":", "# parallel", "# If colli_near, the equation is solvable with t = 0.", "# When t=0, s would have to equal e/b and f/d", "if", "b", "==", "0", "or", "d", "==", "0", ":", "return", "None", "if", "_near", "(", "e", "/", "b", ",", "f", "/", "d", ")", ":", "# colli_near", "px", "=", "x1", "py", "=", "y1", "else", ":", "return", "None", "else", ":", "t", "=", "(", "e", "*", "d", "-", "b", "*", "f", ")", "/", "denom", "# s = (a*f - e*c)/denom", "px", "=", "x1", "+", "t", "*", "(", "x2", "-", "x1", ")", "py", "=", "y1", "+", "t", "*", "(", "y2", "-", "y1", ")", "return", "px", ",", "py" ]
Return the coordinates of a point of intersection given two lines. Return None if the lines are parallel, but non-colli_near. Return an arbitrary point of intersection if the lines are colli_near. Parameters: line1 and line2: lines given by 4 points (x0,y0,x1,y1).
[ "Return", "the", "coordinates", "of", "a", "point", "of", "intersection", "given", "two", "lines", ".", "Return", "None", "if", "the", "lines", "are", "parallel", "but", "non", "-", "colli_near", ".", "Return", "an", "arbitrary", "point", "of", "intersection", "if", "the", "lines", "are", "colli_near", "." ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/line.py#L311-L343
245,274
radjkarl/fancyTools
fancytools/math/line.py
translate
def translate(line, ascent, offs=0): """ offs -> shifts parallel to line ascent -> rotate line """ # TODO: why do I have thuis factor here? ascent *= -2 offs *= -2 l0 = length(line) # change relative to line: t0 = offs # -h+offs t1 = l0 * ascent + offs return translate2P(line, t0, t1)
python
def translate(line, ascent, offs=0): """ offs -> shifts parallel to line ascent -> rotate line """ # TODO: why do I have thuis factor here? ascent *= -2 offs *= -2 l0 = length(line) # change relative to line: t0 = offs # -h+offs t1 = l0 * ascent + offs return translate2P(line, t0, t1)
[ "def", "translate", "(", "line", ",", "ascent", ",", "offs", "=", "0", ")", ":", "# TODO: why do I have thuis factor here?", "ascent", "*=", "-", "2", "offs", "*=", "-", "2", "l0", "=", "length", "(", "line", ")", "# change relative to line:", "t0", "=", "offs", "# -h+offs", "t1", "=", "l0", "*", "ascent", "+", "offs", "return", "translate2P", "(", "line", ",", "t0", ",", "t1", ")" ]
offs -> shifts parallel to line ascent -> rotate line
[ "offs", "-", ">", "shifts", "parallel", "to", "line", "ascent", "-", ">", "rotate", "line" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/line.py#L351-L365
245,275
radjkarl/fancyTools
fancytools/math/line.py
splitN
def splitN(line, n): """ split a line n times returns n sublines """ x0, y0, x1, y1 = line out = empty((n, 4), dtype=type(line[0])) px, py = x0, y0 dx = (x1 - x0) / n dy = (y1 - y0) / n for i in range(n): o = out[i] o[0] = px o[1] = py px += dx py += dy o[2] = px o[3] = py return out
python
def splitN(line, n): """ split a line n times returns n sublines """ x0, y0, x1, y1 = line out = empty((n, 4), dtype=type(line[0])) px, py = x0, y0 dx = (x1 - x0) / n dy = (y1 - y0) / n for i in range(n): o = out[i] o[0] = px o[1] = py px += dx py += dy o[2] = px o[3] = py return out
[ "def", "splitN", "(", "line", ",", "n", ")", ":", "x0", ",", "y0", ",", "x1", ",", "y1", "=", "line", "out", "=", "empty", "(", "(", "n", ",", "4", ")", ",", "dtype", "=", "type", "(", "line", "[", "0", "]", ")", ")", "px", ",", "py", "=", "x0", ",", "y0", "dx", "=", "(", "x1", "-", "x0", ")", "/", "n", "dy", "=", "(", "y1", "-", "y0", ")", "/", "n", "for", "i", "in", "range", "(", "n", ")", ":", "o", "=", "out", "[", "i", "]", "o", "[", "0", "]", "=", "px", "o", "[", "1", "]", "=", "py", "px", "+=", "dx", "py", "+=", "dy", "o", "[", "2", "]", "=", "px", "o", "[", "3", "]", "=", "py", "return", "out" ]
split a line n times returns n sublines
[ "split", "a", "line", "n", "times", "returns", "n", "sublines" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/line.py#L402-L422
245,276
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/request_parser.py
_remove_files
def _remove_files(files): """ Remove all given files. Args: files (list): List of filenames, which will be removed. """ logger.debug("Request for file removal (_remove_files()).") for fn in files: if os.path.exists(fn): logger.debug("Removing '%s'." % fn) os.remove(fn)
python
def _remove_files(files): """ Remove all given files. Args: files (list): List of filenames, which will be removed. """ logger.debug("Request for file removal (_remove_files()).") for fn in files: if os.path.exists(fn): logger.debug("Removing '%s'." % fn) os.remove(fn)
[ "def", "_remove_files", "(", "files", ")", ":", "logger", ".", "debug", "(", "\"Request for file removal (_remove_files()).\"", ")", "for", "fn", "in", "files", ":", "if", "os", ".", "path", ".", "exists", "(", "fn", ")", ":", "logger", ".", "debug", "(", "\"Removing '%s'.\"", "%", "fn", ")", "os", ".", "remove", "(", "fn", ")" ]
Remove all given files. Args: files (list): List of filenames, which will be removed.
[ "Remove", "all", "given", "files", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/request_parser.py#L99-L111
245,277
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/request_parser.py
_safe_read_meta_file
def _safe_read_meta_file(fn, error_protocol): """ Try to read MetadataFile. If the exception is raised, log the errors to the `error_protocol` and return None. """ try: return MetadataFile(fn) except Exception, e: error_protocol.append( "Can't read MetadataFile '%s':\n\t%s\n" % (fn, e.message) )
python
def _safe_read_meta_file(fn, error_protocol): """ Try to read MetadataFile. If the exception is raised, log the errors to the `error_protocol` and return None. """ try: return MetadataFile(fn) except Exception, e: error_protocol.append( "Can't read MetadataFile '%s':\n\t%s\n" % (fn, e.message) )
[ "def", "_safe_read_meta_file", "(", "fn", ",", "error_protocol", ")", ":", "try", ":", "return", "MetadataFile", "(", "fn", ")", "except", "Exception", ",", "e", ":", "error_protocol", ".", "append", "(", "\"Can't read MetadataFile '%s':\\n\\t%s\\n\"", "%", "(", "fn", ",", "e", ".", "message", ")", ")" ]
Try to read MetadataFile. If the exception is raised, log the errors to the `error_protocol` and return None.
[ "Try", "to", "read", "MetadataFile", ".", "If", "the", "exception", "is", "raised", "log", "the", "errors", "to", "the", "error_protocol", "and", "return", "None", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/request_parser.py#L114-L124
245,278
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/request_parser.py
_process_pair
def _process_pair(first_fn, second_fn, error_protocol): """ Look at given filenames, decide which is what and try to pair them. """ ebook = None metadata = None if _is_meta(first_fn) and not _is_meta(second_fn): # 1st meta, 2nd data logger.debug( "Parsed: '%s' as meta, '%s' as data." % (first_fn, second_fn) ) metadata, ebook = first_fn, second_fn elif not _is_meta(first_fn) and _is_meta(second_fn): # 1st data, 2nd meta logger.debug( "Parsed: '%s' as meta, '%s' as data." % (second_fn, first_fn) ) metadata, ebook = second_fn, first_fn elif _is_meta(first_fn) and _is_meta(second_fn): # both metadata logger.debug( "Parsed: both '%s' and '%s' as meta." % (first_fn, second_fn) ) return [ _safe_read_meta_file(first_fn, error_protocol), _safe_read_meta_file(second_fn, error_protocol) ] else: # both data logger.debug( "Parsed: both '%s' and '%s' as data." % (first_fn, second_fn) ) return [ EbookFile(first_fn), EbookFile(second_fn) ] # process pairs, which were created in first two branches of the if # statement above pair = DataPair( metadata_file=_safe_read_meta_file(metadata, error_protocol), ebook_file=EbookFile(ebook) ) if not pair.metadata_file: logger.error( "Can't parse MetadataFile '%s'. Continuing with data file '%s'." % ( metadata, ebook ) ) return [pair.ebook_file] return [pair]
python
def _process_pair(first_fn, second_fn, error_protocol): """ Look at given filenames, decide which is what and try to pair them. """ ebook = None metadata = None if _is_meta(first_fn) and not _is_meta(second_fn): # 1st meta, 2nd data logger.debug( "Parsed: '%s' as meta, '%s' as data." % (first_fn, second_fn) ) metadata, ebook = first_fn, second_fn elif not _is_meta(first_fn) and _is_meta(second_fn): # 1st data, 2nd meta logger.debug( "Parsed: '%s' as meta, '%s' as data." % (second_fn, first_fn) ) metadata, ebook = second_fn, first_fn elif _is_meta(first_fn) and _is_meta(second_fn): # both metadata logger.debug( "Parsed: both '%s' and '%s' as meta." % (first_fn, second_fn) ) return [ _safe_read_meta_file(first_fn, error_protocol), _safe_read_meta_file(second_fn, error_protocol) ] else: # both data logger.debug( "Parsed: both '%s' and '%s' as data." % (first_fn, second_fn) ) return [ EbookFile(first_fn), EbookFile(second_fn) ] # process pairs, which were created in first two branches of the if # statement above pair = DataPair( metadata_file=_safe_read_meta_file(metadata, error_protocol), ebook_file=EbookFile(ebook) ) if not pair.metadata_file: logger.error( "Can't parse MetadataFile '%s'. Continuing with data file '%s'." % ( metadata, ebook ) ) return [pair.ebook_file] return [pair]
[ "def", "_process_pair", "(", "first_fn", ",", "second_fn", ",", "error_protocol", ")", ":", "ebook", "=", "None", "metadata", "=", "None", "if", "_is_meta", "(", "first_fn", ")", "and", "not", "_is_meta", "(", "second_fn", ")", ":", "# 1st meta, 2nd data", "logger", ".", "debug", "(", "\"Parsed: '%s' as meta, '%s' as data.\"", "%", "(", "first_fn", ",", "second_fn", ")", ")", "metadata", ",", "ebook", "=", "first_fn", ",", "second_fn", "elif", "not", "_is_meta", "(", "first_fn", ")", "and", "_is_meta", "(", "second_fn", ")", ":", "# 1st data, 2nd meta", "logger", ".", "debug", "(", "\"Parsed: '%s' as meta, '%s' as data.\"", "%", "(", "second_fn", ",", "first_fn", ")", ")", "metadata", ",", "ebook", "=", "second_fn", ",", "first_fn", "elif", "_is_meta", "(", "first_fn", ")", "and", "_is_meta", "(", "second_fn", ")", ":", "# both metadata", "logger", ".", "debug", "(", "\"Parsed: both '%s' and '%s' as meta.\"", "%", "(", "first_fn", ",", "second_fn", ")", ")", "return", "[", "_safe_read_meta_file", "(", "first_fn", ",", "error_protocol", ")", ",", "_safe_read_meta_file", "(", "second_fn", ",", "error_protocol", ")", "]", "else", ":", "# both data", "logger", ".", "debug", "(", "\"Parsed: both '%s' and '%s' as data.\"", "%", "(", "first_fn", ",", "second_fn", ")", ")", "return", "[", "EbookFile", "(", "first_fn", ")", ",", "EbookFile", "(", "second_fn", ")", "]", "# process pairs, which were created in first two branches of the if", "# statement above", "pair", "=", "DataPair", "(", "metadata_file", "=", "_safe_read_meta_file", "(", "metadata", ",", "error_protocol", ")", ",", "ebook_file", "=", "EbookFile", "(", "ebook", ")", ")", "if", "not", "pair", ".", "metadata_file", ":", "logger", ".", "error", "(", "\"Can't parse MetadataFile '%s'. Continuing with data file '%s'.\"", "%", "(", "metadata", ",", "ebook", ")", ")", "return", "[", "pair", ".", "ebook_file", "]", "return", "[", "pair", "]" ]
Look at given filenames, decide which is what and try to pair them.
[ "Look", "at", "given", "filenames", "decide", "which", "is", "what", "and", "try", "to", "pair", "them", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/request_parser.py#L127-L175
245,279
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/request_parser.py
_process_directory
def _process_directory(files, user_conf, error_protocol): """ Look at items in given directory, try to match them for same names and pair them. If the items can't be paired, add their representation. Note: All successfully processed files are removed. Returns: list: of items. Example: [MetadataFile, DataPair, DataPair, EbookFile] """ items = [] banned = [settings.USER_IMPORT_LOG, settings.USER_ERROR_LOG] files = filter(lambda x: not os.path.basename(x) in banned, files) if len(files) == 2 and conf_merger(user_conf, "SAME_DIR_PAIRING"): logger.debug("There are only two files.") items.extend(_process_pair(files[0], files[1], error_protocol)) files = [] while files: same_names = [] fn = files.pop() logger.debug("Processing '%s'." % fn) # get files with same names (ignore paths and suffixes) if conf_merger(user_conf, "SAME_NAME_DIR_PAIRING"): same_names = _same_named(fn, files) # returns (index, name) indexes = map(lambda (i, fn): i, same_names) # get indexes same_names = map(lambda (i, fn): fn, same_names) # get names # remove `same_names` from `files` (they are processed in this # pass) for i in sorted(indexes, reverse=True): del files[i] # has exactly one file pair SDP = conf_merger(user_conf, "SAME_NAME_DIR_PAIRING") if len(same_names) == 1 and SDP: logger.debug( "'%s' can be probably paired with '%s'." % (fn, same_names[0]) ) items.extend(_process_pair(fn, same_names[0], error_protocol)) elif not same_names: # there is no similar files logger.debug("'%s' can't be paired. Adding standalone file." % fn) if _is_meta(fn): items.append(_safe_read_meta_file(fn, error_protocol)) else: items.append(EbookFile(fn)) else: # error - there is too many similar files logger.error( "Too many files with same name: %s" % ", ".join(same_names) ) error_protocol.append( "Too many files with same name:" + "\n\t".join(same_names) + "\n\n---\n" ) return filter(lambda x: x, items)
python
def _process_directory(files, user_conf, error_protocol): """ Look at items in given directory, try to match them for same names and pair them. If the items can't be paired, add their representation. Note: All successfully processed files are removed. Returns: list: of items. Example: [MetadataFile, DataPair, DataPair, EbookFile] """ items = [] banned = [settings.USER_IMPORT_LOG, settings.USER_ERROR_LOG] files = filter(lambda x: not os.path.basename(x) in banned, files) if len(files) == 2 and conf_merger(user_conf, "SAME_DIR_PAIRING"): logger.debug("There are only two files.") items.extend(_process_pair(files[0], files[1], error_protocol)) files = [] while files: same_names = [] fn = files.pop() logger.debug("Processing '%s'." % fn) # get files with same names (ignore paths and suffixes) if conf_merger(user_conf, "SAME_NAME_DIR_PAIRING"): same_names = _same_named(fn, files) # returns (index, name) indexes = map(lambda (i, fn): i, same_names) # get indexes same_names = map(lambda (i, fn): fn, same_names) # get names # remove `same_names` from `files` (they are processed in this # pass) for i in sorted(indexes, reverse=True): del files[i] # has exactly one file pair SDP = conf_merger(user_conf, "SAME_NAME_DIR_PAIRING") if len(same_names) == 1 and SDP: logger.debug( "'%s' can be probably paired with '%s'." % (fn, same_names[0]) ) items.extend(_process_pair(fn, same_names[0], error_protocol)) elif not same_names: # there is no similar files logger.debug("'%s' can't be paired. Adding standalone file." % fn) if _is_meta(fn): items.append(_safe_read_meta_file(fn, error_protocol)) else: items.append(EbookFile(fn)) else: # error - there is too many similar files logger.error( "Too many files with same name: %s" % ", ".join(same_names) ) error_protocol.append( "Too many files with same name:" + "\n\t".join(same_names) + "\n\n---\n" ) return filter(lambda x: x, items)
[ "def", "_process_directory", "(", "files", ",", "user_conf", ",", "error_protocol", ")", ":", "items", "=", "[", "]", "banned", "=", "[", "settings", ".", "USER_IMPORT_LOG", ",", "settings", ".", "USER_ERROR_LOG", "]", "files", "=", "filter", "(", "lambda", "x", ":", "not", "os", ".", "path", ".", "basename", "(", "x", ")", "in", "banned", ",", "files", ")", "if", "len", "(", "files", ")", "==", "2", "and", "conf_merger", "(", "user_conf", ",", "\"SAME_DIR_PAIRING\"", ")", ":", "logger", ".", "debug", "(", "\"There are only two files.\"", ")", "items", ".", "extend", "(", "_process_pair", "(", "files", "[", "0", "]", ",", "files", "[", "1", "]", ",", "error_protocol", ")", ")", "files", "=", "[", "]", "while", "files", ":", "same_names", "=", "[", "]", "fn", "=", "files", ".", "pop", "(", ")", "logger", ".", "debug", "(", "\"Processing '%s'.\"", "%", "fn", ")", "# get files with same names (ignore paths and suffixes)", "if", "conf_merger", "(", "user_conf", ",", "\"SAME_NAME_DIR_PAIRING\"", ")", ":", "same_names", "=", "_same_named", "(", "fn", ",", "files", ")", "# returns (index, name)", "indexes", "=", "map", "(", "lambda", "(", "i", ",", "fn", ")", ":", "i", ",", "same_names", ")", "# get indexes", "same_names", "=", "map", "(", "lambda", "(", "i", ",", "fn", ")", ":", "fn", ",", "same_names", ")", "# get names", "# remove `same_names` from `files` (they are processed in this", "# pass)", "for", "i", "in", "sorted", "(", "indexes", ",", "reverse", "=", "True", ")", ":", "del", "files", "[", "i", "]", "# has exactly one file pair", "SDP", "=", "conf_merger", "(", "user_conf", ",", "\"SAME_NAME_DIR_PAIRING\"", ")", "if", "len", "(", "same_names", ")", "==", "1", "and", "SDP", ":", "logger", ".", "debug", "(", "\"'%s' can be probably paired with '%s'.\"", "%", "(", "fn", ",", "same_names", "[", "0", "]", ")", ")", "items", ".", "extend", "(", "_process_pair", "(", "fn", ",", "same_names", "[", "0", "]", ",", "error_protocol", ")", ")", "elif", "not", "same_names", ":", "# there is no similar files", "logger", ".", "debug", "(", "\"'%s' can't be paired. Adding standalone file.\"", "%", "fn", ")", "if", "_is_meta", "(", "fn", ")", ":", "items", ".", "append", "(", "_safe_read_meta_file", "(", "fn", ",", "error_protocol", ")", ")", "else", ":", "items", ".", "append", "(", "EbookFile", "(", "fn", ")", ")", "else", ":", "# error - there is too many similar files", "logger", ".", "error", "(", "\"Too many files with same name: %s\"", "%", "\", \"", ".", "join", "(", "same_names", ")", ")", "error_protocol", ".", "append", "(", "\"Too many files with same name:\"", "+", "\"\\n\\t\"", ".", "join", "(", "same_names", ")", "+", "\"\\n\\n---\\n\"", ")", "return", "filter", "(", "lambda", "x", ":", "x", ",", "items", ")" ]
Look at items in given directory, try to match them for same names and pair them. If the items can't be paired, add their representation. Note: All successfully processed files are removed. Returns: list: of items. Example: [MetadataFile, DataPair, DataPair, EbookFile]
[ "Look", "at", "items", "in", "given", "directory", "try", "to", "match", "them", "for", "same", "names", "and", "pair", "them", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/request_parser.py#L178-L241
245,280
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/request_parser.py
_index
def _index(array, item, key=None): """ Array search function. Written, because ``.index()`` method for array doesn't have `key` parameter and raises `ValueError`, if the item is not found. Args: array (list): List of items, which will be searched. item (whatever): Item, which will be matched to elements in `array`. key (function, default None): Function, which will be used for lookup into each element in `array`. Return: Index of `item` in `array`, if the `item` is in `array`, else `-1`. """ for i, el in enumerate(array): resolved_el = key(el) if key else el if resolved_el == item: return i return -1
python
def _index(array, item, key=None): """ Array search function. Written, because ``.index()`` method for array doesn't have `key` parameter and raises `ValueError`, if the item is not found. Args: array (list): List of items, which will be searched. item (whatever): Item, which will be matched to elements in `array`. key (function, default None): Function, which will be used for lookup into each element in `array`. Return: Index of `item` in `array`, if the `item` is in `array`, else `-1`. """ for i, el in enumerate(array): resolved_el = key(el) if key else el if resolved_el == item: return i return -1
[ "def", "_index", "(", "array", ",", "item", ",", "key", "=", "None", ")", ":", "for", "i", ",", "el", "in", "enumerate", "(", "array", ")", ":", "resolved_el", "=", "key", "(", "el", ")", "if", "key", "else", "el", "if", "resolved_el", "==", "item", ":", "return", "i", "return", "-", "1" ]
Array search function. Written, because ``.index()`` method for array doesn't have `key` parameter and raises `ValueError`, if the item is not found. Args: array (list): List of items, which will be searched. item (whatever): Item, which will be matched to elements in `array`. key (function, default None): Function, which will be used for lookup into each element in `array`. Return: Index of `item` in `array`, if the `item` is in `array`, else `-1`.
[ "Array", "search", "function", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/request_parser.py#L244-L266
245,281
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/request_parser.py
_isbn_pairing
def _isbn_pairing(items): """ Pair `items` with same ISBN into `DataPair` objects. Args: items (list): list of items, which will be searched. Returns: list: list with paired items. Paired items are removed, `DataPair` is \ added instead. """ NameWrapper = namedtuple("NameWrapper", ["name", "obj"]) metas = map( lambda x: NameWrapper(_just_name(x.filename), x), filter(lambda x: isinstance(x, MetadataFile), items) ) ebooks = map( lambda x: NameWrapper(_just_name(x.filename), x), filter(lambda x: isinstance(x, EbookFile), items) ) # simple pairing mechanism, which shouldn't be O^2 complex, but something # slightly better metas = sorted(metas, key=lambda x: x.name) ebooks = sorted(ebooks, key=lambda x: x.name, reverse=True) while metas: meta = metas.pop() if not isbn_validator.is_valid_isbn(meta.name): continue if not ebooks: break ebook_index = _index(ebooks, meta.name, key=lambda x: x.name) if ebook_index >= 0: logger.debug( "Pairing '%s' and '%s'." % ( meta.obj.filename, ebooks[ebook_index].obj.filename ) ) items.append( DataPair( metadata_file=meta.obj, ebook_file=ebooks[ebook_index].obj ) ) items.remove(meta.obj) items.remove(ebooks[ebook_index].obj) ebooks = ebooks[ebook_index+1:] return items
python
def _isbn_pairing(items): """ Pair `items` with same ISBN into `DataPair` objects. Args: items (list): list of items, which will be searched. Returns: list: list with paired items. Paired items are removed, `DataPair` is \ added instead. """ NameWrapper = namedtuple("NameWrapper", ["name", "obj"]) metas = map( lambda x: NameWrapper(_just_name(x.filename), x), filter(lambda x: isinstance(x, MetadataFile), items) ) ebooks = map( lambda x: NameWrapper(_just_name(x.filename), x), filter(lambda x: isinstance(x, EbookFile), items) ) # simple pairing mechanism, which shouldn't be O^2 complex, but something # slightly better metas = sorted(metas, key=lambda x: x.name) ebooks = sorted(ebooks, key=lambda x: x.name, reverse=True) while metas: meta = metas.pop() if not isbn_validator.is_valid_isbn(meta.name): continue if not ebooks: break ebook_index = _index(ebooks, meta.name, key=lambda x: x.name) if ebook_index >= 0: logger.debug( "Pairing '%s' and '%s'." % ( meta.obj.filename, ebooks[ebook_index].obj.filename ) ) items.append( DataPair( metadata_file=meta.obj, ebook_file=ebooks[ebook_index].obj ) ) items.remove(meta.obj) items.remove(ebooks[ebook_index].obj) ebooks = ebooks[ebook_index+1:] return items
[ "def", "_isbn_pairing", "(", "items", ")", ":", "NameWrapper", "=", "namedtuple", "(", "\"NameWrapper\"", ",", "[", "\"name\"", ",", "\"obj\"", "]", ")", "metas", "=", "map", "(", "lambda", "x", ":", "NameWrapper", "(", "_just_name", "(", "x", ".", "filename", ")", ",", "x", ")", ",", "filter", "(", "lambda", "x", ":", "isinstance", "(", "x", ",", "MetadataFile", ")", ",", "items", ")", ")", "ebooks", "=", "map", "(", "lambda", "x", ":", "NameWrapper", "(", "_just_name", "(", "x", ".", "filename", ")", ",", "x", ")", ",", "filter", "(", "lambda", "x", ":", "isinstance", "(", "x", ",", "EbookFile", ")", ",", "items", ")", ")", "# simple pairing mechanism, which shouldn't be O^2 complex, but something", "# slightly better", "metas", "=", "sorted", "(", "metas", ",", "key", "=", "lambda", "x", ":", "x", ".", "name", ")", "ebooks", "=", "sorted", "(", "ebooks", ",", "key", "=", "lambda", "x", ":", "x", ".", "name", ",", "reverse", "=", "True", ")", "while", "metas", ":", "meta", "=", "metas", ".", "pop", "(", ")", "if", "not", "isbn_validator", ".", "is_valid_isbn", "(", "meta", ".", "name", ")", ":", "continue", "if", "not", "ebooks", ":", "break", "ebook_index", "=", "_index", "(", "ebooks", ",", "meta", ".", "name", ",", "key", "=", "lambda", "x", ":", "x", ".", "name", ")", "if", "ebook_index", ">=", "0", ":", "logger", ".", "debug", "(", "\"Pairing '%s' and '%s'.\"", "%", "(", "meta", ".", "obj", ".", "filename", ",", "ebooks", "[", "ebook_index", "]", ".", "obj", ".", "filename", ")", ")", "items", ".", "append", "(", "DataPair", "(", "metadata_file", "=", "meta", ".", "obj", ",", "ebook_file", "=", "ebooks", "[", "ebook_index", "]", ".", "obj", ")", ")", "items", ".", "remove", "(", "meta", ".", "obj", ")", "items", ".", "remove", "(", "ebooks", "[", "ebook_index", "]", ".", "obj", ")", "ebooks", "=", "ebooks", "[", "ebook_index", "+", "1", ":", "]", "return", "items" ]
Pair `items` with same ISBN into `DataPair` objects. Args: items (list): list of items, which will be searched. Returns: list: list with paired items. Paired items are removed, `DataPair` is \ added instead.
[ "Pair", "items", "with", "same", "ISBN", "into", "DataPair", "objects", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/request_parser.py#L269-L322
245,282
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/request_parser.py
_create_import_log
def _create_import_log(items): """ Used to create log with successfully imported data. """ log = [] for item in items: if isinstance(item, MetadataFile): log.append( "Metadata file '%s' successfully imported." % item.filename ) elif isinstance(item, EbookFile): log.append( "Ebook file '%s' successfully imported." % item.filename ) elif isinstance(item, DataPair): meta = item.metadata_file.filename data = item.ebook_file.filename log.extend([ "Metadata and data files paired to epub. import request:", "\tMetadata file '%s' successfully imported." % meta, "\tEbook file '%s' successfully imported." % data ]) return log
python
def _create_import_log(items): """ Used to create log with successfully imported data. """ log = [] for item in items: if isinstance(item, MetadataFile): log.append( "Metadata file '%s' successfully imported." % item.filename ) elif isinstance(item, EbookFile): log.append( "Ebook file '%s' successfully imported." % item.filename ) elif isinstance(item, DataPair): meta = item.metadata_file.filename data = item.ebook_file.filename log.extend([ "Metadata and data files paired to epub. import request:", "\tMetadata file '%s' successfully imported." % meta, "\tEbook file '%s' successfully imported." % data ]) return log
[ "def", "_create_import_log", "(", "items", ")", ":", "log", "=", "[", "]", "for", "item", "in", "items", ":", "if", "isinstance", "(", "item", ",", "MetadataFile", ")", ":", "log", ".", "append", "(", "\"Metadata file '%s' successfully imported.\"", "%", "item", ".", "filename", ")", "elif", "isinstance", "(", "item", ",", "EbookFile", ")", ":", "log", ".", "append", "(", "\"Ebook file '%s' successfully imported.\"", "%", "item", ".", "filename", ")", "elif", "isinstance", "(", "item", ",", "DataPair", ")", ":", "meta", "=", "item", ".", "metadata_file", ".", "filename", "data", "=", "item", ".", "ebook_file", ".", "filename", "log", ".", "extend", "(", "[", "\"Metadata and data files paired to epub. import request:\"", ",", "\"\\tMetadata file '%s' successfully imported.\"", "%", "meta", ",", "\"\\tEbook file '%s' successfully imported.\"", "%", "data", "]", ")", "return", "log" ]
Used to create log with successfully imported data.
[ "Used", "to", "create", "log", "with", "successfully", "imported", "data", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/request_parser.py#L325-L349
245,283
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/request_parser.py
_process_items
def _process_items(items, user_conf, error_protocol): """ Parse metadata. Remove processed and sucessfully parsed items. Returns sucessfully processed items. """ def process_meta(item, error_protocol): try: return item._parse() except Exception, e: error_protocol.append( "Can't parse %s: %s" % (item._get_filenames()[0], e.message) ) if isinstance(item, DataPair): return item.ebook_file # process all items and put them to output queue out = [] for item in items: if isinstance(item, EbookFile): out.append(item) else: out.append(process_meta(item, error_protocol)) out = filter(lambda x: x, out) # remove None items (process_meta() fails) # remove processed files fn_pool = [] soon_removed = out if conf_merger(user_conf, "LEAVE_BAD_FILES") else items for item in soon_removed: fn_pool.extend(item._get_filenames()) _remove_files(fn_pool) return out
python
def _process_items(items, user_conf, error_protocol): """ Parse metadata. Remove processed and sucessfully parsed items. Returns sucessfully processed items. """ def process_meta(item, error_protocol): try: return item._parse() except Exception, e: error_protocol.append( "Can't parse %s: %s" % (item._get_filenames()[0], e.message) ) if isinstance(item, DataPair): return item.ebook_file # process all items and put them to output queue out = [] for item in items: if isinstance(item, EbookFile): out.append(item) else: out.append(process_meta(item, error_protocol)) out = filter(lambda x: x, out) # remove None items (process_meta() fails) # remove processed files fn_pool = [] soon_removed = out if conf_merger(user_conf, "LEAVE_BAD_FILES") else items for item in soon_removed: fn_pool.extend(item._get_filenames()) _remove_files(fn_pool) return out
[ "def", "_process_items", "(", "items", ",", "user_conf", ",", "error_protocol", ")", ":", "def", "process_meta", "(", "item", ",", "error_protocol", ")", ":", "try", ":", "return", "item", ".", "_parse", "(", ")", "except", "Exception", ",", "e", ":", "error_protocol", ".", "append", "(", "\"Can't parse %s: %s\"", "%", "(", "item", ".", "_get_filenames", "(", ")", "[", "0", "]", ",", "e", ".", "message", ")", ")", "if", "isinstance", "(", "item", ",", "DataPair", ")", ":", "return", "item", ".", "ebook_file", "# process all items and put them to output queue", "out", "=", "[", "]", "for", "item", "in", "items", ":", "if", "isinstance", "(", "item", ",", "EbookFile", ")", ":", "out", ".", "append", "(", "item", ")", "else", ":", "out", ".", "append", "(", "process_meta", "(", "item", ",", "error_protocol", ")", ")", "out", "=", "filter", "(", "lambda", "x", ":", "x", ",", "out", ")", "# remove None items (process_meta() fails)", "# remove processed files", "fn_pool", "=", "[", "]", "soon_removed", "=", "out", "if", "conf_merger", "(", "user_conf", ",", "\"LEAVE_BAD_FILES\"", ")", "else", "items", "for", "item", "in", "soon_removed", ":", "fn_pool", ".", "extend", "(", "item", ".", "_get_filenames", "(", ")", ")", "_remove_files", "(", "fn_pool", ")", "return", "out" ]
Parse metadata. Remove processed and sucessfully parsed items. Returns sucessfully processed items.
[ "Parse", "metadata", ".", "Remove", "processed", "and", "sucessfully", "parsed", "items", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/request_parser.py#L352-L386
245,284
maxfischer2781/chainlet
chainlet/driver.py
ChainDriver.start
def start(self, daemon=True): """ Start driving the chain asynchronously, return immediately :param daemon: ungracefully kill the driver when the program terminates :type daemon: bool """ if self._run_lock.acquire(False): try: # there is a short race window in which `start` release the lock, # but `run` has not picked it up yet, but the thread exists anyway if self._run_thread is None: self._run_thread = threading.Thread(target=self._run_in_thread) self._run_thread.daemon = daemon self._run_thread.start() finally: self._run_lock.release()
python
def start(self, daemon=True): """ Start driving the chain asynchronously, return immediately :param daemon: ungracefully kill the driver when the program terminates :type daemon: bool """ if self._run_lock.acquire(False): try: # there is a short race window in which `start` release the lock, # but `run` has not picked it up yet, but the thread exists anyway if self._run_thread is None: self._run_thread = threading.Thread(target=self._run_in_thread) self._run_thread.daemon = daemon self._run_thread.start() finally: self._run_lock.release()
[ "def", "start", "(", "self", ",", "daemon", "=", "True", ")", ":", "if", "self", ".", "_run_lock", ".", "acquire", "(", "False", ")", ":", "try", ":", "# there is a short race window in which `start` release the lock,", "# but `run` has not picked it up yet, but the thread exists anyway", "if", "self", ".", "_run_thread", "is", "None", ":", "self", ".", "_run_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_run_in_thread", ")", "self", ".", "_run_thread", ".", "daemon", "=", "daemon", "self", ".", "_run_thread", ".", "start", "(", ")", "finally", ":", "self", ".", "_run_lock", ".", "release", "(", ")" ]
Start driving the chain asynchronously, return immediately :param daemon: ungracefully kill the driver when the program terminates :type daemon: bool
[ "Start", "driving", "the", "chain", "asynchronously", "return", "immediately" ]
4e17f9992b4780bd0d9309202e2847df640bffe8
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/chainlet/driver.py#L27-L43
245,285
maxfischer2781/chainlet
chainlet/driver.py
ChainDriver.run
def run(self): """ Start driving the chain, block until done """ with self._run_lock: while self.mounts: for mount in self.mounts: try: next(mount) except StopIteration: self.mounts.remove(mount)
python
def run(self): """ Start driving the chain, block until done """ with self._run_lock: while self.mounts: for mount in self.mounts: try: next(mount) except StopIteration: self.mounts.remove(mount)
[ "def", "run", "(", "self", ")", ":", "with", "self", ".", "_run_lock", ":", "while", "self", ".", "mounts", ":", "for", "mount", "in", "self", ".", "mounts", ":", "try", ":", "next", "(", "mount", ")", "except", "StopIteration", ":", "self", ".", "mounts", ".", "remove", "(", "mount", ")" ]
Start driving the chain, block until done
[ "Start", "driving", "the", "chain", "block", "until", "done" ]
4e17f9992b4780bd0d9309202e2847df640bffe8
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/chainlet/driver.py#L51-L61
245,286
PyMLGame/pymlgame
pymlgame/surface.py
Surface.fill
def fill(self, color): """ Fill the whole screen with the given color. :param color: Color to use for filling :type color: tuple """ self.matrix = [[color for _ in range(self.height)] for _ in range(self.width)]
python
def fill(self, color): """ Fill the whole screen with the given color. :param color: Color to use for filling :type color: tuple """ self.matrix = [[color for _ in range(self.height)] for _ in range(self.width)]
[ "def", "fill", "(", "self", ",", "color", ")", ":", "self", ".", "matrix", "=", "[", "[", "color", "for", "_", "in", "range", "(", "self", ".", "height", ")", "]", "for", "_", "in", "range", "(", "self", ".", "width", ")", "]" ]
Fill the whole screen with the given color. :param color: Color to use for filling :type color: tuple
[ "Fill", "the", "whole", "screen", "with", "the", "given", "color", "." ]
450fe77d35f9a26c107586d6954f69c3895bf504
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/surface.py#L25-L32
245,287
PyMLGame/pymlgame
pymlgame/surface.py
Surface.draw_dot
def draw_dot(self, pos, color): """ Draw one single dot with the given color on the screen. :param pos: Position of the dot :param color: COlor for the dot :type pos: tuple :type color: tuple """ if 0 <= pos[0] < self.width and 0 <= pos[1] < self.height: self.matrix[pos[0]][pos[1]] = color
python
def draw_dot(self, pos, color): """ Draw one single dot with the given color on the screen. :param pos: Position of the dot :param color: COlor for the dot :type pos: tuple :type color: tuple """ if 0 <= pos[0] < self.width and 0 <= pos[1] < self.height: self.matrix[pos[0]][pos[1]] = color
[ "def", "draw_dot", "(", "self", ",", "pos", ",", "color", ")", ":", "if", "0", "<=", "pos", "[", "0", "]", "<", "self", ".", "width", "and", "0", "<=", "pos", "[", "1", "]", "<", "self", ".", "height", ":", "self", ".", "matrix", "[", "pos", "[", "0", "]", "]", "[", "pos", "[", "1", "]", "]", "=", "color" ]
Draw one single dot with the given color on the screen. :param pos: Position of the dot :param color: COlor for the dot :type pos: tuple :type color: tuple
[ "Draw", "one", "single", "dot", "with", "the", "given", "color", "on", "the", "screen", "." ]
450fe77d35f9a26c107586d6954f69c3895bf504
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/surface.py#L34-L44
245,288
PyMLGame/pymlgame
pymlgame/surface.py
Surface.draw_line
def draw_line(self, start, end, color): """ Draw a line with the given color on the screen. :param start: Start point of the line :param end: End point of the line :param color: Color of the line :type start: tuple :type end: tuple :type color: tuple """ def dist(p, a, b): return (abs((b[0] - a[0]) * (a[1] - p[1]) - (a[0] - p[0]) * (b[1] - a[1])) / math.sqrt((b[0] - a[0])**2 + (b[1] - a[1])**2)) points = [] for x in range(min(start[0], end[0]), max(start[0], end[0]) + 1): for y in range(min(start[1], end[1]), max(start[1], end[1]) + 1): if dist((x, y), start, end) < 0.5: points.append((x, y)) for point in points: self.draw_dot(point, color)
python
def draw_line(self, start, end, color): """ Draw a line with the given color on the screen. :param start: Start point of the line :param end: End point of the line :param color: Color of the line :type start: tuple :type end: tuple :type color: tuple """ def dist(p, a, b): return (abs((b[0] - a[0]) * (a[1] - p[1]) - (a[0] - p[0]) * (b[1] - a[1])) / math.sqrt((b[0] - a[0])**2 + (b[1] - a[1])**2)) points = [] for x in range(min(start[0], end[0]), max(start[0], end[0]) + 1): for y in range(min(start[1], end[1]), max(start[1], end[1]) + 1): if dist((x, y), start, end) < 0.5: points.append((x, y)) for point in points: self.draw_dot(point, color)
[ "def", "draw_line", "(", "self", ",", "start", ",", "end", ",", "color", ")", ":", "def", "dist", "(", "p", ",", "a", ",", "b", ")", ":", "return", "(", "abs", "(", "(", "b", "[", "0", "]", "-", "a", "[", "0", "]", ")", "*", "(", "a", "[", "1", "]", "-", "p", "[", "1", "]", ")", "-", "(", "a", "[", "0", "]", "-", "p", "[", "0", "]", ")", "*", "(", "b", "[", "1", "]", "-", "a", "[", "1", "]", ")", ")", "/", "math", ".", "sqrt", "(", "(", "b", "[", "0", "]", "-", "a", "[", "0", "]", ")", "**", "2", "+", "(", "b", "[", "1", "]", "-", "a", "[", "1", "]", ")", "**", "2", ")", ")", "points", "=", "[", "]", "for", "x", "in", "range", "(", "min", "(", "start", "[", "0", "]", ",", "end", "[", "0", "]", ")", ",", "max", "(", "start", "[", "0", "]", ",", "end", "[", "0", "]", ")", "+", "1", ")", ":", "for", "y", "in", "range", "(", "min", "(", "start", "[", "1", "]", ",", "end", "[", "1", "]", ")", ",", "max", "(", "start", "[", "1", "]", ",", "end", "[", "1", "]", ")", "+", "1", ")", ":", "if", "dist", "(", "(", "x", ",", "y", ")", ",", "start", ",", "end", ")", "<", "0.5", ":", "points", ".", "append", "(", "(", "x", ",", "y", ")", ")", "for", "point", "in", "points", ":", "self", ".", "draw_dot", "(", "point", ",", "color", ")" ]
Draw a line with the given color on the screen. :param start: Start point of the line :param end: End point of the line :param color: Color of the line :type start: tuple :type end: tuple :type color: tuple
[ "Draw", "a", "line", "with", "the", "given", "color", "on", "the", "screen", "." ]
450fe77d35f9a26c107586d6954f69c3895bf504
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/surface.py#L46-L67
245,289
PyMLGame/pymlgame
pymlgame/surface.py
Surface.draw_rect
def draw_rect(self, pos, size, color, fillcolor=None): """ Draw a rectangle with the given color on the screen and optionally fill it with fillcolor. :param pos: Top left corner of the rectangle :param size: Sieze of the rectangle :param color: Color for borders :param fillcolor: Color for infill :type pos: tuple :type size: tuple :type color: tuple :type fillcolor: tuple """ # draw top and botton line for x in range(size[0]): self.draw_dot((pos[0] + x, pos[1]), color) self.draw_dot((pos[0] + x, pos[1] + size[1] - 1), color) # draw left and right side for y in range(size[1]): self.draw_dot((pos[0], pos[1] + y), color) self.draw_dot((pos[0] + size[0] - 1, pos[1] + y), color) # draw filled rect if fillcolor and size[0] >= 3 and size[1] >= 3: for x in range(size[0] - 2): for y in range(size[1] - 2): self.draw_dot((pos[0] + 1 + x, pos[1] + 1 + y), fillcolor)
python
def draw_rect(self, pos, size, color, fillcolor=None): """ Draw a rectangle with the given color on the screen and optionally fill it with fillcolor. :param pos: Top left corner of the rectangle :param size: Sieze of the rectangle :param color: Color for borders :param fillcolor: Color for infill :type pos: tuple :type size: tuple :type color: tuple :type fillcolor: tuple """ # draw top and botton line for x in range(size[0]): self.draw_dot((pos[0] + x, pos[1]), color) self.draw_dot((pos[0] + x, pos[1] + size[1] - 1), color) # draw left and right side for y in range(size[1]): self.draw_dot((pos[0], pos[1] + y), color) self.draw_dot((pos[0] + size[0] - 1, pos[1] + y), color) # draw filled rect if fillcolor and size[0] >= 3 and size[1] >= 3: for x in range(size[0] - 2): for y in range(size[1] - 2): self.draw_dot((pos[0] + 1 + x, pos[1] + 1 + y), fillcolor)
[ "def", "draw_rect", "(", "self", ",", "pos", ",", "size", ",", "color", ",", "fillcolor", "=", "None", ")", ":", "# draw top and botton line", "for", "x", "in", "range", "(", "size", "[", "0", "]", ")", ":", "self", ".", "draw_dot", "(", "(", "pos", "[", "0", "]", "+", "x", ",", "pos", "[", "1", "]", ")", ",", "color", ")", "self", ".", "draw_dot", "(", "(", "pos", "[", "0", "]", "+", "x", ",", "pos", "[", "1", "]", "+", "size", "[", "1", "]", "-", "1", ")", ",", "color", ")", "# draw left and right side", "for", "y", "in", "range", "(", "size", "[", "1", "]", ")", ":", "self", ".", "draw_dot", "(", "(", "pos", "[", "0", "]", ",", "pos", "[", "1", "]", "+", "y", ")", ",", "color", ")", "self", ".", "draw_dot", "(", "(", "pos", "[", "0", "]", "+", "size", "[", "0", "]", "-", "1", ",", "pos", "[", "1", "]", "+", "y", ")", ",", "color", ")", "# draw filled rect", "if", "fillcolor", "and", "size", "[", "0", "]", ">=", "3", "and", "size", "[", "1", "]", ">=", "3", ":", "for", "x", "in", "range", "(", "size", "[", "0", "]", "-", "2", ")", ":", "for", "y", "in", "range", "(", "size", "[", "1", "]", "-", "2", ")", ":", "self", ".", "draw_dot", "(", "(", "pos", "[", "0", "]", "+", "1", "+", "x", ",", "pos", "[", "1", "]", "+", "1", "+", "y", ")", ",", "fillcolor", ")" ]
Draw a rectangle with the given color on the screen and optionally fill it with fillcolor. :param pos: Top left corner of the rectangle :param size: Sieze of the rectangle :param color: Color for borders :param fillcolor: Color for infill :type pos: tuple :type size: tuple :type color: tuple :type fillcolor: tuple
[ "Draw", "a", "rectangle", "with", "the", "given", "color", "on", "the", "screen", "and", "optionally", "fill", "it", "with", "fillcolor", "." ]
450fe77d35f9a26c107586d6954f69c3895bf504
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/surface.py#L69-L94
245,290
PyMLGame/pymlgame
pymlgame/surface.py
Surface.draw_circle
def draw_circle(self, pos, radius, color, fillcolor=None): """ Draw a circle with the given color on the screen and optionally fill it with fillcolor. :param pos: Center of the circle :param radius: Radius :param color: Color for border :param fillcolor: Color for infill :type pos: tuple :type radius: int :type color: tuple :type fillcolor: tuple """ #TODO: This still produces rubbish but it's on a good way to success def dist(d, p, r): return abs(math.sqrt((p[0] - d[0])**2 + (p[1] - d[1])**2) - r) points = [] for x in range(pos[0] - radius, pos[0] + radius): for y in range(pos[1] - radius, pos[1] + radius): if 0 < x < self.width and 0 < y < self.height: if dist((x, y), pos, radius) < 1.3: points.append((x, y)) # draw fill color if fillcolor: for point in points: pass # draw outline for point in points: self.draw_dot(point, color)
python
def draw_circle(self, pos, radius, color, fillcolor=None): """ Draw a circle with the given color on the screen and optionally fill it with fillcolor. :param pos: Center of the circle :param radius: Radius :param color: Color for border :param fillcolor: Color for infill :type pos: tuple :type radius: int :type color: tuple :type fillcolor: tuple """ #TODO: This still produces rubbish but it's on a good way to success def dist(d, p, r): return abs(math.sqrt((p[0] - d[0])**2 + (p[1] - d[1])**2) - r) points = [] for x in range(pos[0] - radius, pos[0] + radius): for y in range(pos[1] - radius, pos[1] + radius): if 0 < x < self.width and 0 < y < self.height: if dist((x, y), pos, radius) < 1.3: points.append((x, y)) # draw fill color if fillcolor: for point in points: pass # draw outline for point in points: self.draw_dot(point, color)
[ "def", "draw_circle", "(", "self", ",", "pos", ",", "radius", ",", "color", ",", "fillcolor", "=", "None", ")", ":", "#TODO: This still produces rubbish but it's on a good way to success", "def", "dist", "(", "d", ",", "p", ",", "r", ")", ":", "return", "abs", "(", "math", ".", "sqrt", "(", "(", "p", "[", "0", "]", "-", "d", "[", "0", "]", ")", "**", "2", "+", "(", "p", "[", "1", "]", "-", "d", "[", "1", "]", ")", "**", "2", ")", "-", "r", ")", "points", "=", "[", "]", "for", "x", "in", "range", "(", "pos", "[", "0", "]", "-", "radius", ",", "pos", "[", "0", "]", "+", "radius", ")", ":", "for", "y", "in", "range", "(", "pos", "[", "1", "]", "-", "radius", ",", "pos", "[", "1", "]", "+", "radius", ")", ":", "if", "0", "<", "x", "<", "self", ".", "width", "and", "0", "<", "y", "<", "self", ".", "height", ":", "if", "dist", "(", "(", "x", ",", "y", ")", ",", "pos", ",", "radius", ")", "<", "1.3", ":", "points", ".", "append", "(", "(", "x", ",", "y", ")", ")", "# draw fill color", "if", "fillcolor", ":", "for", "point", "in", "points", ":", "pass", "# draw outline", "for", "point", "in", "points", ":", "self", ".", "draw_dot", "(", "point", ",", "color", ")" ]
Draw a circle with the given color on the screen and optionally fill it with fillcolor. :param pos: Center of the circle :param radius: Radius :param color: Color for border :param fillcolor: Color for infill :type pos: tuple :type radius: int :type color: tuple :type fillcolor: tuple
[ "Draw", "a", "circle", "with", "the", "given", "color", "on", "the", "screen", "and", "optionally", "fill", "it", "with", "fillcolor", "." ]
450fe77d35f9a26c107586d6954f69c3895bf504
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/surface.py#L96-L126
245,291
PyMLGame/pymlgame
pymlgame/surface.py
Surface.blit
def blit(self, surface, pos=(0, 0)): """ Blits a surface on this surface at pos :param surface: Surface to blit :param pos: Top left point to start blitting :type surface: Surface :type pos: tuple """ for x in range(surface.width): for y in range(surface.height): px = x + pos[0] py = y + pos[1] if 0 < px < self.width and 0 < py < self.height: self.matrix[px][py] = surface.matrix[x][y]
python
def blit(self, surface, pos=(0, 0)): """ Blits a surface on this surface at pos :param surface: Surface to blit :param pos: Top left point to start blitting :type surface: Surface :type pos: tuple """ for x in range(surface.width): for y in range(surface.height): px = x + pos[0] py = y + pos[1] if 0 < px < self.width and 0 < py < self.height: self.matrix[px][py] = surface.matrix[x][y]
[ "def", "blit", "(", "self", ",", "surface", ",", "pos", "=", "(", "0", ",", "0", ")", ")", ":", "for", "x", "in", "range", "(", "surface", ".", "width", ")", ":", "for", "y", "in", "range", "(", "surface", ".", "height", ")", ":", "px", "=", "x", "+", "pos", "[", "0", "]", "py", "=", "y", "+", "pos", "[", "1", "]", "if", "0", "<", "px", "<", "self", ".", "width", "and", "0", "<", "py", "<", "self", ".", "height", ":", "self", ".", "matrix", "[", "px", "]", "[", "py", "]", "=", "surface", ".", "matrix", "[", "x", "]", "[", "y", "]" ]
Blits a surface on this surface at pos :param surface: Surface to blit :param pos: Top left point to start blitting :type surface: Surface :type pos: tuple
[ "Blits", "a", "surface", "on", "this", "surface", "at", "pos" ]
450fe77d35f9a26c107586d6954f69c3895bf504
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/surface.py#L128-L142
245,292
PyMLGame/pymlgame
pymlgame/surface.py
Surface.replace_color
def replace_color(self, before, after): """ Replaces a color on a surface with another one. :param before: Change all pixels with this color :param after: To that color :type before: tuple :type after: tuple """ #TODO: find out if this actually works #((self.matrix[x][y] = after for y in range(self.height) if self.matrix[x][y] == before) for x in range(self.width)) for x in range(self.width): for y in range(self.height): if self.matrix[x][y] == before: self.matrix[x][y] = after
python
def replace_color(self, before, after): """ Replaces a color on a surface with another one. :param before: Change all pixels with this color :param after: To that color :type before: tuple :type after: tuple """ #TODO: find out if this actually works #((self.matrix[x][y] = after for y in range(self.height) if self.matrix[x][y] == before) for x in range(self.width)) for x in range(self.width): for y in range(self.height): if self.matrix[x][y] == before: self.matrix[x][y] = after
[ "def", "replace_color", "(", "self", ",", "before", ",", "after", ")", ":", "#TODO: find out if this actually works", "#((self.matrix[x][y] = after for y in range(self.height) if self.matrix[x][y] == before) for x in range(self.width))", "for", "x", "in", "range", "(", "self", ".", "width", ")", ":", "for", "y", "in", "range", "(", "self", ".", "height", ")", ":", "if", "self", ".", "matrix", "[", "x", "]", "[", "y", "]", "==", "before", ":", "self", ".", "matrix", "[", "x", "]", "[", "y", "]", "=", "after" ]
Replaces a color on a surface with another one. :param before: Change all pixels with this color :param after: To that color :type before: tuple :type after: tuple
[ "Replaces", "a", "color", "on", "a", "surface", "with", "another", "one", "." ]
450fe77d35f9a26c107586d6954f69c3895bf504
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/surface.py#L144-L158
245,293
matthewdeanmartin/jiggle_version
build.py
detect_secrets
def detect_secrets(): """ Call detect-secrets tool """ # use # blah blah = "foo" # pragma: whitelist secret # to ignore a false posites errors_file = "detect-secrets-results.txt" print(execute_get_text("pwd")) command = "{0} detect-secrets --scan --base64-limit 4 --exclude .idea|.js|.min.js|.html|.xsd|" \ "lock.json|synced_folders|.scss|Pipfile.lock|" \ "lint.txt|{1}".format(PIPENV, errors_file).strip() print(command) bash_process = subprocess.Popen(command.split(" "), #shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) foo = bash_process.wait() out, err = bash_process.communicate() # wait with open(errors_file, "w+") as file_handle: if len(out)==0: print("Warning- no output from detect secrets. Happens with git hook, but not from ordinary command line.") return file_handle.write(out.decode()) with open(errors_file) as f: try: data = json.load(f) except Exception: print("Can't read json") exit(-1) return if data["results"]: for result in data["results"]: print(result) print("detect-secrets has discovered high entropy strings, possibly passwords?") exit(-1)
python
def detect_secrets(): """ Call detect-secrets tool """ # use # blah blah = "foo" # pragma: whitelist secret # to ignore a false posites errors_file = "detect-secrets-results.txt" print(execute_get_text("pwd")) command = "{0} detect-secrets --scan --base64-limit 4 --exclude .idea|.js|.min.js|.html|.xsd|" \ "lock.json|synced_folders|.scss|Pipfile.lock|" \ "lint.txt|{1}".format(PIPENV, errors_file).strip() print(command) bash_process = subprocess.Popen(command.split(" "), #shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) foo = bash_process.wait() out, err = bash_process.communicate() # wait with open(errors_file, "w+") as file_handle: if len(out)==0: print("Warning- no output from detect secrets. Happens with git hook, but not from ordinary command line.") return file_handle.write(out.decode()) with open(errors_file) as f: try: data = json.load(f) except Exception: print("Can't read json") exit(-1) return if data["results"]: for result in data["results"]: print(result) print("detect-secrets has discovered high entropy strings, possibly passwords?") exit(-1)
[ "def", "detect_secrets", "(", ")", ":", "# use", "# blah blah = \"foo\" # pragma: whitelist secret", "# to ignore a false posites", "errors_file", "=", "\"detect-secrets-results.txt\"", "print", "(", "execute_get_text", "(", "\"pwd\"", ")", ")", "command", "=", "\"{0} detect-secrets --scan --base64-limit 4 --exclude .idea|.js|.min.js|.html|.xsd|\"", "\"lock.json|synced_folders|.scss|Pipfile.lock|\"", "\"lint.txt|{1}\"", ".", "format", "(", "PIPENV", ",", "errors_file", ")", ".", "strip", "(", ")", "print", "(", "command", ")", "bash_process", "=", "subprocess", ".", "Popen", "(", "command", ".", "split", "(", "\" \"", ")", ",", "#shell=True,", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "foo", "=", "bash_process", ".", "wait", "(", ")", "out", ",", "err", "=", "bash_process", ".", "communicate", "(", ")", "# wait", "with", "open", "(", "errors_file", ",", "\"w+\"", ")", "as", "file_handle", ":", "if", "len", "(", "out", ")", "==", "0", ":", "print", "(", "\"Warning- no output from detect secrets. Happens with git hook, but not from ordinary command line.\"", ")", "return", "file_handle", ".", "write", "(", "out", ".", "decode", "(", ")", ")", "with", "open", "(", "errors_file", ")", "as", "f", ":", "try", ":", "data", "=", "json", ".", "load", "(", "f", ")", "except", "Exception", ":", "print", "(", "\"Can't read json\"", ")", "exit", "(", "-", "1", ")", "return", "if", "data", "[", "\"results\"", "]", ":", "for", "result", "in", "data", "[", "\"results\"", "]", ":", "print", "(", "result", ")", "print", "(", "\"detect-secrets has discovered high entropy strings, possibly passwords?\"", ")", "exit", "(", "-", "1", ")" ]
Call detect-secrets tool
[ "Call", "detect", "-", "secrets", "tool" ]
963656a0a47b7162780a5f6c8f4b8bbbebc148f5
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/build.py#L138-L182
245,294
matthewdeanmartin/jiggle_version
build.py
mypy
def mypy(): """ Are types ok? """ if sys.version_info < (3, 4): print("Mypy doesn't work on python < 3.4") return if IS_TRAVIS: command = "{0} -m mypy {1} --ignore-missing-imports --strict".format(PYTHON, PROJECT_NAME).strip() else: command = "{0} mypy {1} --ignore-missing-imports --strict".format(PIPENV, PROJECT_NAME).strip() bash_process = subprocess.Popen(command.split(" "), # shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = bash_process.communicate() # wait mypy_file = "mypy_errors.txt" with open(mypy_file, "w+") as lint_file: lines = out.decode().split("\n") for line in lines: if "build_utils.py" in line: continue if "test.py" in line: continue if "tests.py" in line: continue if "/test_" in line: continue if "/tests_" in line: continue else: lint_file.writelines([line + "\n"]) num_lines = sum(1 for line in open(mypy_file) if line and line.strip(" \n")) max_lines = 25 if num_lines > max_lines: raise TypeError("Too many lines of mypy : {0}, max {1}".format(num_lines, max_lines))
python
def mypy(): """ Are types ok? """ if sys.version_info < (3, 4): print("Mypy doesn't work on python < 3.4") return if IS_TRAVIS: command = "{0} -m mypy {1} --ignore-missing-imports --strict".format(PYTHON, PROJECT_NAME).strip() else: command = "{0} mypy {1} --ignore-missing-imports --strict".format(PIPENV, PROJECT_NAME).strip() bash_process = subprocess.Popen(command.split(" "), # shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = bash_process.communicate() # wait mypy_file = "mypy_errors.txt" with open(mypy_file, "w+") as lint_file: lines = out.decode().split("\n") for line in lines: if "build_utils.py" in line: continue if "test.py" in line: continue if "tests.py" in line: continue if "/test_" in line: continue if "/tests_" in line: continue else: lint_file.writelines([line + "\n"]) num_lines = sum(1 for line in open(mypy_file) if line and line.strip(" \n")) max_lines = 25 if num_lines > max_lines: raise TypeError("Too many lines of mypy : {0}, max {1}".format(num_lines, max_lines))
[ "def", "mypy", "(", ")", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "4", ")", ":", "print", "(", "\"Mypy doesn't work on python < 3.4\"", ")", "return", "if", "IS_TRAVIS", ":", "command", "=", "\"{0} -m mypy {1} --ignore-missing-imports --strict\"", ".", "format", "(", "PYTHON", ",", "PROJECT_NAME", ")", ".", "strip", "(", ")", "else", ":", "command", "=", "\"{0} mypy {1} --ignore-missing-imports --strict\"", ".", "format", "(", "PIPENV", ",", "PROJECT_NAME", ")", ".", "strip", "(", ")", "bash_process", "=", "subprocess", ".", "Popen", "(", "command", ".", "split", "(", "\" \"", ")", ",", "# shell=True,", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "bash_process", ".", "communicate", "(", ")", "# wait", "mypy_file", "=", "\"mypy_errors.txt\"", "with", "open", "(", "mypy_file", ",", "\"w+\"", ")", "as", "lint_file", ":", "lines", "=", "out", ".", "decode", "(", ")", ".", "split", "(", "\"\\n\"", ")", "for", "line", "in", "lines", ":", "if", "\"build_utils.py\"", "in", "line", ":", "continue", "if", "\"test.py\"", "in", "line", ":", "continue", "if", "\"tests.py\"", "in", "line", ":", "continue", "if", "\"/test_\"", "in", "line", ":", "continue", "if", "\"/tests_\"", "in", "line", ":", "continue", "else", ":", "lint_file", ".", "writelines", "(", "[", "line", "+", "\"\\n\"", "]", ")", "num_lines", "=", "sum", "(", "1", "for", "line", "in", "open", "(", "mypy_file", ")", "if", "line", "and", "line", ".", "strip", "(", "\" \\n\"", ")", ")", "max_lines", "=", "25", "if", "num_lines", ">", "max_lines", ":", "raise", "TypeError", "(", "\"Too many lines of mypy : {0}, max {1}\"", ".", "format", "(", "num_lines", ",", "max_lines", ")", ")" ]
Are types ok?
[ "Are", "types", "ok?" ]
963656a0a47b7162780a5f6c8f4b8bbbebc148f5
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/build.py#L336-L374
245,295
matthewdeanmartin/jiggle_version
build.py
gemfury
def gemfury(): """ Push to gem fury, a repo with private options """ # fury login # fury push dist/*.gz --as=YOUR_ACCT # fury push dist/*.whl --as=YOUR_ACCT cp = subprocess.run(("fury login --as={0}".format(GEM_FURY).split(" ")), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, check=True) print(cp.stdout) about = {} with open(os.path.join(SRC, PROJECT_NAME, "__version__.py")) as f: exec(f.read(), about) version = Version(about["__version__"]) print("Have version : " + str(version)) print("Preparing to upload") if version not in get_versions(): for kind in ["gz", "whl"]: try: files = glob.glob("{0}dist/*.{1}".format(SRC.replace(".", ""), kind)) for file_name in files: cp = subprocess.run(("fury push {0} --as={1}".format(file_name, GEM_FURY).split(" ")), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, check=True) print("result of fury push") for stream in [cp.stdout, cp.stderr]: if stream: for line in stream.decode().split("\n"): print(line) except subprocess.CalledProcessError as cpe: print("result of fury push- got error") for stream in [cp.stdout, cp.stderr]: if stream: for line in stream.decode().split("\n"): print(line) print(cpe) raise
python
def gemfury(): """ Push to gem fury, a repo with private options """ # fury login # fury push dist/*.gz --as=YOUR_ACCT # fury push dist/*.whl --as=YOUR_ACCT cp = subprocess.run(("fury login --as={0}".format(GEM_FURY).split(" ")), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, check=True) print(cp.stdout) about = {} with open(os.path.join(SRC, PROJECT_NAME, "__version__.py")) as f: exec(f.read(), about) version = Version(about["__version__"]) print("Have version : " + str(version)) print("Preparing to upload") if version not in get_versions(): for kind in ["gz", "whl"]: try: files = glob.glob("{0}dist/*.{1}".format(SRC.replace(".", ""), kind)) for file_name in files: cp = subprocess.run(("fury push {0} --as={1}".format(file_name, GEM_FURY).split(" ")), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, check=True) print("result of fury push") for stream in [cp.stdout, cp.stderr]: if stream: for line in stream.decode().split("\n"): print(line) except subprocess.CalledProcessError as cpe: print("result of fury push- got error") for stream in [cp.stdout, cp.stderr]: if stream: for line in stream.decode().split("\n"): print(line) print(cpe) raise
[ "def", "gemfury", "(", ")", ":", "# fury login", "# fury push dist/*.gz --as=YOUR_ACCT", "# fury push dist/*.whl --as=YOUR_ACCT", "cp", "=", "subprocess", ".", "run", "(", "(", "\"fury login --as={0}\"", ".", "format", "(", "GEM_FURY", ")", ".", "split", "(", "\" \"", ")", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "False", ",", "check", "=", "True", ")", "print", "(", "cp", ".", "stdout", ")", "about", "=", "{", "}", "with", "open", "(", "os", ".", "path", ".", "join", "(", "SRC", ",", "PROJECT_NAME", ",", "\"__version__.py\"", ")", ")", "as", "f", ":", "exec", "(", "f", ".", "read", "(", ")", ",", "about", ")", "version", "=", "Version", "(", "about", "[", "\"__version__\"", "]", ")", "print", "(", "\"Have version : \"", "+", "str", "(", "version", ")", ")", "print", "(", "\"Preparing to upload\"", ")", "if", "version", "not", "in", "get_versions", "(", ")", ":", "for", "kind", "in", "[", "\"gz\"", ",", "\"whl\"", "]", ":", "try", ":", "files", "=", "glob", ".", "glob", "(", "\"{0}dist/*.{1}\"", ".", "format", "(", "SRC", ".", "replace", "(", "\".\"", ",", "\"\"", ")", ",", "kind", ")", ")", "for", "file_name", "in", "files", ":", "cp", "=", "subprocess", ".", "run", "(", "(", "\"fury push {0} --as={1}\"", ".", "format", "(", "file_name", ",", "GEM_FURY", ")", ".", "split", "(", "\" \"", ")", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "False", ",", "check", "=", "True", ")", "print", "(", "\"result of fury push\"", ")", "for", "stream", "in", "[", "cp", ".", "stdout", ",", "cp", ".", "stderr", "]", ":", "if", "stream", ":", "for", "line", "in", "stream", ".", "decode", "(", ")", ".", "split", "(", "\"\\n\"", ")", ":", "print", "(", "line", ")", "except", "subprocess", ".", "CalledProcessError", "as", "cpe", ":", "print", "(", "\"result of fury push- got error\"", ")", "for", "stream", "in", "[", "cp", ".", "stdout", ",", "cp", ".", "stderr", "]", ":", "if", "stream", ":", "for", "line", "in", "stream", ".", "decode", "(", ")", ".", "split", "(", "\"\\n\"", ")", ":", "print", "(", "line", ")", "print", "(", "cpe", ")", "raise" ]
Push to gem fury, a repo with private options
[ "Push", "to", "gem", "fury", "a", "repo", "with", "private", "options" ]
963656a0a47b7162780a5f6c8f4b8bbbebc148f5
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/build.py#L449-L490
245,296
ronaldguillen/wave
wave/utils/field_mapping.py
needs_label
def needs_label(model_field, field_name): """ Returns `True` if the label based on the model's verbose name is not equal to the default label it would have based on it's field name. """ default_label = field_name.replace('_', ' ').capitalize() return capfirst(model_field.verbose_name) != default_label
python
def needs_label(model_field, field_name): """ Returns `True` if the label based on the model's verbose name is not equal to the default label it would have based on it's field name. """ default_label = field_name.replace('_', ' ').capitalize() return capfirst(model_field.verbose_name) != default_label
[ "def", "needs_label", "(", "model_field", ",", "field_name", ")", ":", "default_label", "=", "field_name", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "capitalize", "(", ")", "return", "capfirst", "(", "model_field", ".", "verbose_name", ")", "!=", "default_label" ]
Returns `True` if the label based on the model's verbose name is not equal to the default label it would have based on it's field name.
[ "Returns", "True", "if", "the", "label", "based", "on", "the", "model", "s", "verbose", "name", "is", "not", "equal", "to", "the", "default", "label", "it", "would", "have", "based", "on", "it", "s", "field", "name", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/utils/field_mapping.py#L46-L52
245,297
ronaldguillen/wave
wave/utils/field_mapping.py
get_relation_kwargs
def get_relation_kwargs(field_name, relation_info): """ Creates a default instance of a flat relational field. """ model_field, related_model, to_many, to_field, has_through_model = relation_info kwargs = { 'queryset': related_model._default_manager, 'view_name': get_detail_view_name(related_model) } if to_many: kwargs['many'] = True if to_field: kwargs['to_field'] = to_field if has_through_model: kwargs['read_only'] = True kwargs.pop('queryset', None) if model_field: if model_field.verbose_name and needs_label(model_field, field_name): kwargs['label'] = capfirst(model_field.verbose_name) help_text = model_field.help_text if help_text: kwargs['help_text'] = help_text if not model_field.editable: kwargs['read_only'] = True kwargs.pop('queryset', None) if kwargs.get('read_only', False): # If this field is read-only, then return early. # No further keyword arguments are valid. return kwargs if model_field.has_default() or model_field.blank or model_field.null: kwargs['required'] = False if model_field.null: kwargs['allow_null'] = True if model_field.validators: kwargs['validators'] = model_field.validators if getattr(model_field, 'unique', False): validator = UniqueValidator(queryset=model_field.model._default_manager) kwargs['validators'] = kwargs.get('validators', []) + [validator] if to_many and not model_field.blank: kwargs['allow_empty'] = False return kwargs
python
def get_relation_kwargs(field_name, relation_info): """ Creates a default instance of a flat relational field. """ model_field, related_model, to_many, to_field, has_through_model = relation_info kwargs = { 'queryset': related_model._default_manager, 'view_name': get_detail_view_name(related_model) } if to_many: kwargs['many'] = True if to_field: kwargs['to_field'] = to_field if has_through_model: kwargs['read_only'] = True kwargs.pop('queryset', None) if model_field: if model_field.verbose_name and needs_label(model_field, field_name): kwargs['label'] = capfirst(model_field.verbose_name) help_text = model_field.help_text if help_text: kwargs['help_text'] = help_text if not model_field.editable: kwargs['read_only'] = True kwargs.pop('queryset', None) if kwargs.get('read_only', False): # If this field is read-only, then return early. # No further keyword arguments are valid. return kwargs if model_field.has_default() or model_field.blank or model_field.null: kwargs['required'] = False if model_field.null: kwargs['allow_null'] = True if model_field.validators: kwargs['validators'] = model_field.validators if getattr(model_field, 'unique', False): validator = UniqueValidator(queryset=model_field.model._default_manager) kwargs['validators'] = kwargs.get('validators', []) + [validator] if to_many and not model_field.blank: kwargs['allow_empty'] = False return kwargs
[ "def", "get_relation_kwargs", "(", "field_name", ",", "relation_info", ")", ":", "model_field", ",", "related_model", ",", "to_many", ",", "to_field", ",", "has_through_model", "=", "relation_info", "kwargs", "=", "{", "'queryset'", ":", "related_model", ".", "_default_manager", ",", "'view_name'", ":", "get_detail_view_name", "(", "related_model", ")", "}", "if", "to_many", ":", "kwargs", "[", "'many'", "]", "=", "True", "if", "to_field", ":", "kwargs", "[", "'to_field'", "]", "=", "to_field", "if", "has_through_model", ":", "kwargs", "[", "'read_only'", "]", "=", "True", "kwargs", ".", "pop", "(", "'queryset'", ",", "None", ")", "if", "model_field", ":", "if", "model_field", ".", "verbose_name", "and", "needs_label", "(", "model_field", ",", "field_name", ")", ":", "kwargs", "[", "'label'", "]", "=", "capfirst", "(", "model_field", ".", "verbose_name", ")", "help_text", "=", "model_field", ".", "help_text", "if", "help_text", ":", "kwargs", "[", "'help_text'", "]", "=", "help_text", "if", "not", "model_field", ".", "editable", ":", "kwargs", "[", "'read_only'", "]", "=", "True", "kwargs", ".", "pop", "(", "'queryset'", ",", "None", ")", "if", "kwargs", ".", "get", "(", "'read_only'", ",", "False", ")", ":", "# If this field is read-only, then return early.", "# No further keyword arguments are valid.", "return", "kwargs", "if", "model_field", ".", "has_default", "(", ")", "or", "model_field", ".", "blank", "or", "model_field", ".", "null", ":", "kwargs", "[", "'required'", "]", "=", "False", "if", "model_field", ".", "null", ":", "kwargs", "[", "'allow_null'", "]", "=", "True", "if", "model_field", ".", "validators", ":", "kwargs", "[", "'validators'", "]", "=", "model_field", ".", "validators", "if", "getattr", "(", "model_field", ",", "'unique'", ",", "False", ")", ":", "validator", "=", "UniqueValidator", "(", "queryset", "=", "model_field", ".", "model", ".", "_default_manager", ")", "kwargs", "[", "'validators'", "]", "=", "kwargs", ".", "get", "(", "'validators'", ",", "[", "]", ")", "+", "[", "validator", "]", "if", "to_many", "and", "not", "model_field", ".", "blank", ":", "kwargs", "[", "'allow_empty'", "]", "=", "False", "return", "kwargs" ]
Creates a default instance of a flat relational field.
[ "Creates", "a", "default", "instance", "of", "a", "flat", "relational", "field", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/utils/field_mapping.py#L237-L283
245,298
Diaoul/pyextdirect
pyextdirect/api.py
create_api_dict
def create_api_dict(bases, url, **kwargs): """Create an API dict :param bases: configuration bases :type bases: :class:`~pyextdirect.configuration.Base` or list of :class:`~pyextdirect.configuration.Base` :param string url: URL where the router can be reached :param \*\*kwargs: extra keyword arguments to populate the API dict. Most common keyword arguments are *id*, *maxRetries*, *namespace*, *priority* and *timeout* .. note:: Keyword arguments *type*, *url*, *actions* and *enableUrlEncode* will be overridden """ api = kwargs or {} api.update({'type': 'remoting', 'url': url, 'actions': defaultdict(list), 'enableUrlEncode': 'data'}) if not isinstance(bases, list): bases = [bases] configuration = merge_configurations([b.configuration for b in bases]) for action, methods in configuration.iteritems(): for method, element in methods.iteritems(): if isinstance(element, tuple): func = getattr(element[0], element[1]) attrs = len(inspect.getargspec(func)[0]) - 1 else: func = element attrs = len(inspect.getargspec(func)[0]) spec = {'name': method, 'len': attrs} if func.exposed_kind == SUBMIT: spec['formHandler'] = True api['actions'][action].append(spec) return api
python
def create_api_dict(bases, url, **kwargs): """Create an API dict :param bases: configuration bases :type bases: :class:`~pyextdirect.configuration.Base` or list of :class:`~pyextdirect.configuration.Base` :param string url: URL where the router can be reached :param \*\*kwargs: extra keyword arguments to populate the API dict. Most common keyword arguments are *id*, *maxRetries*, *namespace*, *priority* and *timeout* .. note:: Keyword arguments *type*, *url*, *actions* and *enableUrlEncode* will be overridden """ api = kwargs or {} api.update({'type': 'remoting', 'url': url, 'actions': defaultdict(list), 'enableUrlEncode': 'data'}) if not isinstance(bases, list): bases = [bases] configuration = merge_configurations([b.configuration for b in bases]) for action, methods in configuration.iteritems(): for method, element in methods.iteritems(): if isinstance(element, tuple): func = getattr(element[0], element[1]) attrs = len(inspect.getargspec(func)[0]) - 1 else: func = element attrs = len(inspect.getargspec(func)[0]) spec = {'name': method, 'len': attrs} if func.exposed_kind == SUBMIT: spec['formHandler'] = True api['actions'][action].append(spec) return api
[ "def", "create_api_dict", "(", "bases", ",", "url", ",", "*", "*", "kwargs", ")", ":", "api", "=", "kwargs", "or", "{", "}", "api", ".", "update", "(", "{", "'type'", ":", "'remoting'", ",", "'url'", ":", "url", ",", "'actions'", ":", "defaultdict", "(", "list", ")", ",", "'enableUrlEncode'", ":", "'data'", "}", ")", "if", "not", "isinstance", "(", "bases", ",", "list", ")", ":", "bases", "=", "[", "bases", "]", "configuration", "=", "merge_configurations", "(", "[", "b", ".", "configuration", "for", "b", "in", "bases", "]", ")", "for", "action", ",", "methods", "in", "configuration", ".", "iteritems", "(", ")", ":", "for", "method", ",", "element", "in", "methods", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "element", ",", "tuple", ")", ":", "func", "=", "getattr", "(", "element", "[", "0", "]", ",", "element", "[", "1", "]", ")", "attrs", "=", "len", "(", "inspect", ".", "getargspec", "(", "func", ")", "[", "0", "]", ")", "-", "1", "else", ":", "func", "=", "element", "attrs", "=", "len", "(", "inspect", ".", "getargspec", "(", "func", ")", "[", "0", "]", ")", "spec", "=", "{", "'name'", ":", "method", ",", "'len'", ":", "attrs", "}", "if", "func", ".", "exposed_kind", "==", "SUBMIT", ":", "spec", "[", "'formHandler'", "]", "=", "True", "api", "[", "'actions'", "]", "[", "action", "]", ".", "append", "(", "spec", ")", "return", "api" ]
Create an API dict :param bases: configuration bases :type bases: :class:`~pyextdirect.configuration.Base` or list of :class:`~pyextdirect.configuration.Base` :param string url: URL where the router can be reached :param \*\*kwargs: extra keyword arguments to populate the API dict. Most common keyword arguments are *id*, *maxRetries*, *namespace*, *priority* and *timeout* .. note:: Keyword arguments *type*, *url*, *actions* and *enableUrlEncode* will be overridden
[ "Create", "an", "API", "dict" ]
34ddfe882d467b3769644e8131fb90fe472eff80
https://github.com/Diaoul/pyextdirect/blob/34ddfe882d467b3769644e8131fb90fe472eff80/pyextdirect/api.py#L36-L65
245,299
maxfischer2781/chainlet
chainlet/primitives/bundle.py
bundle_sequences
def bundle_sequences(element): """ Convert sequence types to bundles This converter automatically constructs a :py:class:`~.Bundle` from any :py:class:`tuple`, :py:class:`list` or :py:class:`set` encountered during linking. The following two lines produce the same chain: .. code:: python a >> [b, c, d] >> e a >> Bundle((b, c, d)) >> e """ if isinstance(element, (tuple, list, set)): return Bundle(element) return NotImplemented
python
def bundle_sequences(element): """ Convert sequence types to bundles This converter automatically constructs a :py:class:`~.Bundle` from any :py:class:`tuple`, :py:class:`list` or :py:class:`set` encountered during linking. The following two lines produce the same chain: .. code:: python a >> [b, c, d] >> e a >> Bundle((b, c, d)) >> e """ if isinstance(element, (tuple, list, set)): return Bundle(element) return NotImplemented
[ "def", "bundle_sequences", "(", "element", ")", ":", "if", "isinstance", "(", "element", ",", "(", "tuple", ",", "list", ",", "set", ")", ")", ":", "return", "Bundle", "(", "element", ")", "return", "NotImplemented" ]
Convert sequence types to bundles This converter automatically constructs a :py:class:`~.Bundle` from any :py:class:`tuple`, :py:class:`list` or :py:class:`set` encountered during linking. The following two lines produce the same chain: .. code:: python a >> [b, c, d] >> e a >> Bundle((b, c, d)) >> e
[ "Convert", "sequence", "types", "to", "bundles" ]
4e17f9992b4780bd0d9309202e2847df640bffe8
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/chainlet/primitives/bundle.py#L41-L57