id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
244,900
|
dossier/dossier.models
|
dossier/models/etl/interface.py
|
add_sip_to_fc
|
def add_sip_to_fc(fc, tfidf, limit=40):
'''add "bowNP_sip" to `fc` using `tfidf` data
'''
if 'bowNP' not in fc:
return
if tfidf is None:
return
sips = features.sip_noun_phrases(tfidf, fc['bowNP'].keys(), limit=limit)
fc[u'bowNP_sip'] = StringCounter(sips)
|
python
|
def add_sip_to_fc(fc, tfidf, limit=40):
'''add "bowNP_sip" to `fc` using `tfidf` data
'''
if 'bowNP' not in fc:
return
if tfidf is None:
return
sips = features.sip_noun_phrases(tfidf, fc['bowNP'].keys(), limit=limit)
fc[u'bowNP_sip'] = StringCounter(sips)
|
[
"def",
"add_sip_to_fc",
"(",
"fc",
",",
"tfidf",
",",
"limit",
"=",
"40",
")",
":",
"if",
"'bowNP'",
"not",
"in",
"fc",
":",
"return",
"if",
"tfidf",
"is",
"None",
":",
"return",
"sips",
"=",
"features",
".",
"sip_noun_phrases",
"(",
"tfidf",
",",
"fc",
"[",
"'bowNP'",
"]",
".",
"keys",
"(",
")",
",",
"limit",
"=",
"limit",
")",
"fc",
"[",
"u'bowNP_sip'",
"]",
"=",
"StringCounter",
"(",
"sips",
")"
] |
add "bowNP_sip" to `fc` using `tfidf` data
|
[
"add",
"bowNP_sip",
"to",
"fc",
"using",
"tfidf",
"data"
] |
c9e282f690eab72963926329efe1600709e48b13
|
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/etl/interface.py#L234-L242
|
244,901
|
collectiveacuity/labPack
|
labpack/location/find.py
|
findClient.get_password
|
def get_password(self):
'''
a method to retrieve the password for the group mosquitto server
:return: string with group mosquitto server password
NOTE: result is added to self.password property
'''
import requests
url = '%s/mqtt' % self.endpoint
params = {
'group': self.group_name
}
response = requests.put(url, params=params)
response_details = response.json()
self.password = response_details['password']
return self.password
|
python
|
def get_password(self):
'''
a method to retrieve the password for the group mosquitto server
:return: string with group mosquitto server password
NOTE: result is added to self.password property
'''
import requests
url = '%s/mqtt' % self.endpoint
params = {
'group': self.group_name
}
response = requests.put(url, params=params)
response_details = response.json()
self.password = response_details['password']
return self.password
|
[
"def",
"get_password",
"(",
"self",
")",
":",
"import",
"requests",
"url",
"=",
"'%s/mqtt'",
"%",
"self",
".",
"endpoint",
"params",
"=",
"{",
"'group'",
":",
"self",
".",
"group_name",
"}",
"response",
"=",
"requests",
".",
"put",
"(",
"url",
",",
"params",
"=",
"params",
")",
"response_details",
"=",
"response",
".",
"json",
"(",
")",
"self",
".",
"password",
"=",
"response_details",
"[",
"'password'",
"]",
"return",
"self",
".",
"password"
] |
a method to retrieve the password for the group mosquitto server
:return: string with group mosquitto server password
NOTE: result is added to self.password property
|
[
"a",
"method",
"to",
"retrieve",
"the",
"password",
"for",
"the",
"group",
"mosquitto",
"server"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/location/find.py#L83-L102
|
244,902
|
collectiveacuity/labPack
|
labpack/location/find.py
|
findClient.get_position
|
def get_position(self, user_id, track=False, confidence=False):
'''
a method to retrieve the latest position of a user
:param user_id: string with id of user
:param track: [optional] boolean to add user to self.positions
:param confidence: [optional] boolean to include the data model confidence scores
:return: dictionaries with position details
NOTE: if user does not exist, then location and time are null values
{
'time': 0.0,
'location': 'location.id',
'id': 'user_id',
bayes: {}, # if confidence = True
svm: None, # if confidence = True
rf: {} # if confidence = True
}
'''
title = '%s.get_position' % self.__class__.__name__
# validate inputs
input_fields = {
'user_id': user_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct empty response
position_details = {
'location': '',
'time': 0.0,
'id': user_id
}
# construct empty position history
position_history = []
# compose request
import requests
url = self.endpoint + '/location'
params = {
'group': self.group_name,
'user': user_id,
'n': 1
}
response = requests.get(url, params=params)
# ingest response
response_details = response.json()
from labpack.records.time import labDT
for key in response_details['users'].keys():
if key == user_id:
for entry in response_details['users'][key]:
if 'time' in entry.keys() and 'location' in entry.keys():
time_string = entry['time']
time_string = time_string.replace(' +0000 UTC', 'Z')
time_string = time_string.replace(' ', 'T')
time_dt = labDT.fromISO(time_string).epoch()
if confidence:
for key, value in entry.items():
position_details[key] = value
position_details['time'] = time_dt
position_details['location'] = entry['location']
break
if track:
stored_position = {
'location': position_details['location'],
'time': position_details['time']
}
self.positions[user_id] = stored_position
return position_details
|
python
|
def get_position(self, user_id, track=False, confidence=False):
'''
a method to retrieve the latest position of a user
:param user_id: string with id of user
:param track: [optional] boolean to add user to self.positions
:param confidence: [optional] boolean to include the data model confidence scores
:return: dictionaries with position details
NOTE: if user does not exist, then location and time are null values
{
'time': 0.0,
'location': 'location.id',
'id': 'user_id',
bayes: {}, # if confidence = True
svm: None, # if confidence = True
rf: {} # if confidence = True
}
'''
title = '%s.get_position' % self.__class__.__name__
# validate inputs
input_fields = {
'user_id': user_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct empty response
position_details = {
'location': '',
'time': 0.0,
'id': user_id
}
# construct empty position history
position_history = []
# compose request
import requests
url = self.endpoint + '/location'
params = {
'group': self.group_name,
'user': user_id,
'n': 1
}
response = requests.get(url, params=params)
# ingest response
response_details = response.json()
from labpack.records.time import labDT
for key in response_details['users'].keys():
if key == user_id:
for entry in response_details['users'][key]:
if 'time' in entry.keys() and 'location' in entry.keys():
time_string = entry['time']
time_string = time_string.replace(' +0000 UTC', 'Z')
time_string = time_string.replace(' ', 'T')
time_dt = labDT.fromISO(time_string).epoch()
if confidence:
for key, value in entry.items():
position_details[key] = value
position_details['time'] = time_dt
position_details['location'] = entry['location']
break
if track:
stored_position = {
'location': position_details['location'],
'time': position_details['time']
}
self.positions[user_id] = stored_position
return position_details
|
[
"def",
"get_position",
"(",
"self",
",",
"user_id",
",",
"track",
"=",
"False",
",",
"confidence",
"=",
"False",
")",
":",
"title",
"=",
"'%s.get_position'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'user_id'",
":",
"user_id",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# construct empty response",
"position_details",
"=",
"{",
"'location'",
":",
"''",
",",
"'time'",
":",
"0.0",
",",
"'id'",
":",
"user_id",
"}",
"# construct empty position history",
"position_history",
"=",
"[",
"]",
"# compose request",
"import",
"requests",
"url",
"=",
"self",
".",
"endpoint",
"+",
"'/location'",
"params",
"=",
"{",
"'group'",
":",
"self",
".",
"group_name",
",",
"'user'",
":",
"user_id",
",",
"'n'",
":",
"1",
"}",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
")",
"# ingest response",
"response_details",
"=",
"response",
".",
"json",
"(",
")",
"from",
"labpack",
".",
"records",
".",
"time",
"import",
"labDT",
"for",
"key",
"in",
"response_details",
"[",
"'users'",
"]",
".",
"keys",
"(",
")",
":",
"if",
"key",
"==",
"user_id",
":",
"for",
"entry",
"in",
"response_details",
"[",
"'users'",
"]",
"[",
"key",
"]",
":",
"if",
"'time'",
"in",
"entry",
".",
"keys",
"(",
")",
"and",
"'location'",
"in",
"entry",
".",
"keys",
"(",
")",
":",
"time_string",
"=",
"entry",
"[",
"'time'",
"]",
"time_string",
"=",
"time_string",
".",
"replace",
"(",
"' +0000 UTC'",
",",
"'Z'",
")",
"time_string",
"=",
"time_string",
".",
"replace",
"(",
"' '",
",",
"'T'",
")",
"time_dt",
"=",
"labDT",
".",
"fromISO",
"(",
"time_string",
")",
".",
"epoch",
"(",
")",
"if",
"confidence",
":",
"for",
"key",
",",
"value",
"in",
"entry",
".",
"items",
"(",
")",
":",
"position_details",
"[",
"key",
"]",
"=",
"value",
"position_details",
"[",
"'time'",
"]",
"=",
"time_dt",
"position_details",
"[",
"'location'",
"]",
"=",
"entry",
"[",
"'location'",
"]",
"break",
"if",
"track",
":",
"stored_position",
"=",
"{",
"'location'",
":",
"position_details",
"[",
"'location'",
"]",
",",
"'time'",
":",
"position_details",
"[",
"'time'",
"]",
"}",
"self",
".",
"positions",
"[",
"user_id",
"]",
"=",
"stored_position",
"return",
"position_details"
] |
a method to retrieve the latest position of a user
:param user_id: string with id of user
:param track: [optional] boolean to add user to self.positions
:param confidence: [optional] boolean to include the data model confidence scores
:return: dictionaries with position details
NOTE: if user does not exist, then location and time are null values
{
'time': 0.0,
'location': 'location.id',
'id': 'user_id',
bayes: {}, # if confidence = True
svm: None, # if confidence = True
rf: {} # if confidence = True
}
|
[
"a",
"method",
"to",
"retrieve",
"the",
"latest",
"position",
"of",
"a",
"user"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/location/find.py#L150-L227
|
244,903
|
OlivierB/Bashutils
|
bashutils/logmsg.py
|
log_begin_message
|
def log_begin_message(message):
"""
Create and print a new log message
waiting for an end log message
"""
global MESSAGE
MESSAGE = message
sys.stdout.write("[....] ")
sys.stdout.write(message)
sys.stdout.flush()
|
python
|
def log_begin_message(message):
"""
Create and print a new log message
waiting for an end log message
"""
global MESSAGE
MESSAGE = message
sys.stdout.write("[....] ")
sys.stdout.write(message)
sys.stdout.flush()
|
[
"def",
"log_begin_message",
"(",
"message",
")",
":",
"global",
"MESSAGE",
"MESSAGE",
"=",
"message",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"[....] \"",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"message",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] |
Create and print a new log message
waiting for an end log message
|
[
"Create",
"and",
"print",
"a",
"new",
"log",
"message",
"waiting",
"for",
"an",
"end",
"log",
"message"
] |
487762049f5d09f14f8a6c764bc0a823f332d8a1
|
https://github.com/OlivierB/Bashutils/blob/487762049f5d09f14f8a6c764bc0a823f332d8a1/bashutils/logmsg.py#L80-L89
|
244,904
|
OlivierB/Bashutils
|
bashutils/logmsg.py
|
log_end_message
|
def log_end_message(log):
"""
End a log message with a status
defined by log
"""
if not log in MESSAGE_LOG.keys():
log = -1
res = colors.color_text(*MESSAGE_LOG[log][1])
sys.stdout.write("\r[" + res + "] " + MESSAGE + "\n")
|
python
|
def log_end_message(log):
"""
End a log message with a status
defined by log
"""
if not log in MESSAGE_LOG.keys():
log = -1
res = colors.color_text(*MESSAGE_LOG[log][1])
sys.stdout.write("\r[" + res + "] " + MESSAGE + "\n")
|
[
"def",
"log_end_message",
"(",
"log",
")",
":",
"if",
"not",
"log",
"in",
"MESSAGE_LOG",
".",
"keys",
"(",
")",
":",
"log",
"=",
"-",
"1",
"res",
"=",
"colors",
".",
"color_text",
"(",
"*",
"MESSAGE_LOG",
"[",
"log",
"]",
"[",
"1",
"]",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\r[\"",
"+",
"res",
"+",
"\"] \"",
"+",
"MESSAGE",
"+",
"\"\\n\"",
")"
] |
End a log message with a status
defined by log
|
[
"End",
"a",
"log",
"message",
"with",
"a",
"status",
"defined",
"by",
"log"
] |
487762049f5d09f14f8a6c764bc0a823f332d8a1
|
https://github.com/OlivierB/Bashutils/blob/487762049f5d09f14f8a6c764bc0a823f332d8a1/bashutils/logmsg.py#L92-L102
|
244,905
|
deifyed/vault
|
libconman/vault.py
|
Vault._fetchFilesFromFolder
|
def _fetchFilesFromFolder(self, target, recursive):
'''
Fetches files from the target directory, and - if recursive
mode is on, all subdirectories.
Returns a list of all found files
'''
directory_items = os.walk(target)
# If recursive is false, fetch only the first tuple
if not recursive:
directory_items = [next(directory_items)]
targets = []
for dir_name, folders, files in directory_items:
for f in files:
targets.append(os.path.join(dir_name, f))
return targets
|
python
|
def _fetchFilesFromFolder(self, target, recursive):
'''
Fetches files from the target directory, and - if recursive
mode is on, all subdirectories.
Returns a list of all found files
'''
directory_items = os.walk(target)
# If recursive is false, fetch only the first tuple
if not recursive:
directory_items = [next(directory_items)]
targets = []
for dir_name, folders, files in directory_items:
for f in files:
targets.append(os.path.join(dir_name, f))
return targets
|
[
"def",
"_fetchFilesFromFolder",
"(",
"self",
",",
"target",
",",
"recursive",
")",
":",
"directory_items",
"=",
"os",
".",
"walk",
"(",
"target",
")",
"# If recursive is false, fetch only the first tuple",
"if",
"not",
"recursive",
":",
"directory_items",
"=",
"[",
"next",
"(",
"directory_items",
")",
"]",
"targets",
"=",
"[",
"]",
"for",
"dir_name",
",",
"folders",
",",
"files",
"in",
"directory_items",
":",
"for",
"f",
"in",
"files",
":",
"targets",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"f",
")",
")",
"return",
"targets"
] |
Fetches files from the target directory, and - if recursive
mode is on, all subdirectories.
Returns a list of all found files
|
[
"Fetches",
"files",
"from",
"the",
"target",
"directory",
"and",
"-",
"if",
"recursive",
"mode",
"is",
"on",
"all",
"subdirectories",
"."
] |
e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97
|
https://github.com/deifyed/vault/blob/e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97/libconman/vault.py#L26-L44
|
244,906
|
deifyed/vault
|
libconman/vault.py
|
Vault.remove
|
def remove(self, iid):
'''
Deletes file from vault and removes database information
'''
for index in iid:
target = Target.getTarget(index)
target.delete()
|
python
|
def remove(self, iid):
'''
Deletes file from vault and removes database information
'''
for index in iid:
target = Target.getTarget(index)
target.delete()
|
[
"def",
"remove",
"(",
"self",
",",
"iid",
")",
":",
"for",
"index",
"in",
"iid",
":",
"target",
"=",
"Target",
".",
"getTarget",
"(",
"index",
")",
"target",
".",
"delete",
"(",
")"
] |
Deletes file from vault and removes database information
|
[
"Deletes",
"file",
"from",
"vault",
"and",
"removes",
"database",
"information"
] |
e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97
|
https://github.com/deifyed/vault/blob/e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97/libconman/vault.py#L60-L67
|
244,907
|
deifyed/vault
|
libconman/vault.py
|
Vault.deploy
|
def deploy(self, iid):
'''
Links an item from the vault to the original path
'''
for index in iid:
target = Target.getTarget(index)
if target:
verbose('Deploying id {} from {} to {} with the name {}'
.format(index, target.vault_path, target.path, target.name))
target.deploy()
verbose('Deploy complete')
|
python
|
def deploy(self, iid):
'''
Links an item from the vault to the original path
'''
for index in iid:
target = Target.getTarget(index)
if target:
verbose('Deploying id {} from {} to {} with the name {}'
.format(index, target.vault_path, target.path, target.name))
target.deploy()
verbose('Deploy complete')
|
[
"def",
"deploy",
"(",
"self",
",",
"iid",
")",
":",
"for",
"index",
"in",
"iid",
":",
"target",
"=",
"Target",
".",
"getTarget",
"(",
"index",
")",
"if",
"target",
":",
"verbose",
"(",
"'Deploying id {} from {} to {} with the name {}'",
".",
"format",
"(",
"index",
",",
"target",
".",
"vault_path",
",",
"target",
".",
"path",
",",
"target",
".",
"name",
")",
")",
"target",
".",
"deploy",
"(",
")",
"verbose",
"(",
"'Deploy complete'",
")"
] |
Links an item from the vault to the original path
|
[
"Links",
"an",
"item",
"from",
"the",
"vault",
"to",
"the",
"original",
"path"
] |
e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97
|
https://github.com/deifyed/vault/blob/e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97/libconman/vault.py#L69-L81
|
244,908
|
deifyed/vault
|
libconman/vault.py
|
Vault.deployAll
|
def deployAll(self):
'''
Deploys all the items from the vault. Useful after a format
'''
targets = [Target.getTarget(iid) for iid, n, p in self.db.listTargets()]
for target in targets:
target.deploy()
verbose('Deploy all complete')
|
python
|
def deployAll(self):
'''
Deploys all the items from the vault. Useful after a format
'''
targets = [Target.getTarget(iid) for iid, n, p in self.db.listTargets()]
for target in targets:
target.deploy()
verbose('Deploy all complete')
|
[
"def",
"deployAll",
"(",
"self",
")",
":",
"targets",
"=",
"[",
"Target",
".",
"getTarget",
"(",
"iid",
")",
"for",
"iid",
",",
"n",
",",
"p",
"in",
"self",
".",
"db",
".",
"listTargets",
"(",
")",
"]",
"for",
"target",
"in",
"targets",
":",
"target",
".",
"deploy",
"(",
")",
"verbose",
"(",
"'Deploy all complete'",
")"
] |
Deploys all the items from the vault. Useful after a format
|
[
"Deploys",
"all",
"the",
"items",
"from",
"the",
"vault",
".",
"Useful",
"after",
"a",
"format"
] |
e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97
|
https://github.com/deifyed/vault/blob/e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97/libconman/vault.py#L83-L92
|
244,909
|
sassoo/goldman
|
goldman/resources/model.py
|
on_delete
|
def on_delete(resc, req, resp, rid): # pylint: disable=unused-argument
""" Delete the single item
Upon a successful deletion an empty bodied 204
is returned.
"""
signals.pre_req.send(resc.model)
signals.pre_req_delete.send(resc.model)
model = find(resc.model, rid)
goldman.sess.store.delete(model)
resp.status = falcon.HTTP_204
signals.post_req.send(resc.model)
signals.post_req_delete.send(resc.model)
|
python
|
def on_delete(resc, req, resp, rid): # pylint: disable=unused-argument
""" Delete the single item
Upon a successful deletion an empty bodied 204
is returned.
"""
signals.pre_req.send(resc.model)
signals.pre_req_delete.send(resc.model)
model = find(resc.model, rid)
goldman.sess.store.delete(model)
resp.status = falcon.HTTP_204
signals.post_req.send(resc.model)
signals.post_req_delete.send(resc.model)
|
[
"def",
"on_delete",
"(",
"resc",
",",
"req",
",",
"resp",
",",
"rid",
")",
":",
"# pylint: disable=unused-argument",
"signals",
".",
"pre_req",
".",
"send",
"(",
"resc",
".",
"model",
")",
"signals",
".",
"pre_req_delete",
".",
"send",
"(",
"resc",
".",
"model",
")",
"model",
"=",
"find",
"(",
"resc",
".",
"model",
",",
"rid",
")",
"goldman",
".",
"sess",
".",
"store",
".",
"delete",
"(",
"model",
")",
"resp",
".",
"status",
"=",
"falcon",
".",
"HTTP_204",
"signals",
".",
"post_req",
".",
"send",
"(",
"resc",
".",
"model",
")",
"signals",
".",
"post_req_delete",
".",
"send",
"(",
"resc",
".",
"model",
")"
] |
Delete the single item
Upon a successful deletion an empty bodied 204
is returned.
|
[
"Delete",
"the",
"single",
"item"
] |
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
|
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/resources/model.py#L23-L39
|
244,910
|
sassoo/goldman
|
goldman/resources/model.py
|
on_get
|
def on_get(resc, req, resp, rid):
""" Find the model by id & serialize it back """
signals.pre_req.send(resc.model)
signals.pre_req_find.send(resc.model)
model = find(resc.model, rid)
props = to_rest_model(model, includes=req.includes)
resp.last_modified = model.updated
resp.serialize(props)
signals.post_req.send(resc.model)
signals.post_req_find.send(resc.model)
|
python
|
def on_get(resc, req, resp, rid):
""" Find the model by id & serialize it back """
signals.pre_req.send(resc.model)
signals.pre_req_find.send(resc.model)
model = find(resc.model, rid)
props = to_rest_model(model, includes=req.includes)
resp.last_modified = model.updated
resp.serialize(props)
signals.post_req.send(resc.model)
signals.post_req_find.send(resc.model)
|
[
"def",
"on_get",
"(",
"resc",
",",
"req",
",",
"resp",
",",
"rid",
")",
":",
"signals",
".",
"pre_req",
".",
"send",
"(",
"resc",
".",
"model",
")",
"signals",
".",
"pre_req_find",
".",
"send",
"(",
"resc",
".",
"model",
")",
"model",
"=",
"find",
"(",
"resc",
".",
"model",
",",
"rid",
")",
"props",
"=",
"to_rest_model",
"(",
"model",
",",
"includes",
"=",
"req",
".",
"includes",
")",
"resp",
".",
"last_modified",
"=",
"model",
".",
"updated",
"resp",
".",
"serialize",
"(",
"props",
")",
"signals",
".",
"post_req",
".",
"send",
"(",
"resc",
".",
"model",
")",
"signals",
".",
"post_req_find",
".",
"send",
"(",
"resc",
".",
"model",
")"
] |
Find the model by id & serialize it back
|
[
"Find",
"the",
"model",
"by",
"id",
"&",
"serialize",
"it",
"back"
] |
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
|
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/resources/model.py#L42-L55
|
244,911
|
sassoo/goldman
|
goldman/resources/model.py
|
on_patch
|
def on_patch(resc, req, resp, rid):
""" Deserialize the payload & update the single item """
signals.pre_req.send(resc.model)
signals.pre_req_update.send(resc.model)
props = req.deserialize()
model = find(resc.model, rid)
from_rest(model, props)
goldman.sess.store.update(model)
props = to_rest_model(model, includes=req.includes)
resp.last_modified = model.updated
resp.serialize(props)
signals.post_req.send(resc.model)
signals.post_req_update.send(resc.model)
|
python
|
def on_patch(resc, req, resp, rid):
""" Deserialize the payload & update the single item """
signals.pre_req.send(resc.model)
signals.pre_req_update.send(resc.model)
props = req.deserialize()
model = find(resc.model, rid)
from_rest(model, props)
goldman.sess.store.update(model)
props = to_rest_model(model, includes=req.includes)
resp.last_modified = model.updated
resp.serialize(props)
signals.post_req.send(resc.model)
signals.post_req_update.send(resc.model)
|
[
"def",
"on_patch",
"(",
"resc",
",",
"req",
",",
"resp",
",",
"rid",
")",
":",
"signals",
".",
"pre_req",
".",
"send",
"(",
"resc",
".",
"model",
")",
"signals",
".",
"pre_req_update",
".",
"send",
"(",
"resc",
".",
"model",
")",
"props",
"=",
"req",
".",
"deserialize",
"(",
")",
"model",
"=",
"find",
"(",
"resc",
".",
"model",
",",
"rid",
")",
"from_rest",
"(",
"model",
",",
"props",
")",
"goldman",
".",
"sess",
".",
"store",
".",
"update",
"(",
"model",
")",
"props",
"=",
"to_rest_model",
"(",
"model",
",",
"includes",
"=",
"req",
".",
"includes",
")",
"resp",
".",
"last_modified",
"=",
"model",
".",
"updated",
"resp",
".",
"serialize",
"(",
"props",
")",
"signals",
".",
"post_req",
".",
"send",
"(",
"resc",
".",
"model",
")",
"signals",
".",
"post_req_update",
".",
"send",
"(",
"resc",
".",
"model",
")"
] |
Deserialize the payload & update the single item
|
[
"Deserialize",
"the",
"payload",
"&",
"update",
"the",
"single",
"item"
] |
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
|
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/resources/model.py#L58-L75
|
244,912
|
edeposit/edeposit.amqp.ftp
|
src/edeposit/amqp/ftp/decoders/parser_xml.py
|
decode
|
def decode(data):
"""
Handles decoding of the XML `data`.
Args:
data (str): Data which will be decoded.
Returns:
dict: Dictionary with decoded data.
"""
dom = None
try:
dom = dhtmlparser.parseString(data)
except Exception, e:
raise MetaParsingException("Can't parse your XML data: %s" % e.message)
root = dom.find("root")
# check whether there is <root>s
if not root:
raise MetaParsingException("All elements have to be inside <root>.")
# and make sure, that there is not too many <root>s
if len(root) > 1:
raise MetaParsingException("Too many <root> elements in your XML!")
items = root[0].find("item")
# check for items
if not items:
raise MetaParsingException("There are no <items> in your XML <root>!")
decoded = []
for item in items:
if "key" not in item.params:
raise MetaParsingException(
"There is no 'key' parameter in %s." % str(item)
)
decoded.append([
item.params["key"],
item.getContent().strip()
])
decoded = validator.check_structure(decoded)
return decoded
|
python
|
def decode(data):
"""
Handles decoding of the XML `data`.
Args:
data (str): Data which will be decoded.
Returns:
dict: Dictionary with decoded data.
"""
dom = None
try:
dom = dhtmlparser.parseString(data)
except Exception, e:
raise MetaParsingException("Can't parse your XML data: %s" % e.message)
root = dom.find("root")
# check whether there is <root>s
if not root:
raise MetaParsingException("All elements have to be inside <root>.")
# and make sure, that there is not too many <root>s
if len(root) > 1:
raise MetaParsingException("Too many <root> elements in your XML!")
items = root[0].find("item")
# check for items
if not items:
raise MetaParsingException("There are no <items> in your XML <root>!")
decoded = []
for item in items:
if "key" not in item.params:
raise MetaParsingException(
"There is no 'key' parameter in %s." % str(item)
)
decoded.append([
item.params["key"],
item.getContent().strip()
])
decoded = validator.check_structure(decoded)
return decoded
|
[
"def",
"decode",
"(",
"data",
")",
":",
"dom",
"=",
"None",
"try",
":",
"dom",
"=",
"dhtmlparser",
".",
"parseString",
"(",
"data",
")",
"except",
"Exception",
",",
"e",
":",
"raise",
"MetaParsingException",
"(",
"\"Can't parse your XML data: %s\"",
"%",
"e",
".",
"message",
")",
"root",
"=",
"dom",
".",
"find",
"(",
"\"root\"",
")",
"# check whether there is <root>s",
"if",
"not",
"root",
":",
"raise",
"MetaParsingException",
"(",
"\"All elements have to be inside <root>.\"",
")",
"# and make sure, that there is not too many <root>s",
"if",
"len",
"(",
"root",
")",
">",
"1",
":",
"raise",
"MetaParsingException",
"(",
"\"Too many <root> elements in your XML!\"",
")",
"items",
"=",
"root",
"[",
"0",
"]",
".",
"find",
"(",
"\"item\"",
")",
"# check for items",
"if",
"not",
"items",
":",
"raise",
"MetaParsingException",
"(",
"\"There are no <items> in your XML <root>!\"",
")",
"decoded",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"if",
"\"key\"",
"not",
"in",
"item",
".",
"params",
":",
"raise",
"MetaParsingException",
"(",
"\"There is no 'key' parameter in %s.\"",
"%",
"str",
"(",
"item",
")",
")",
"decoded",
".",
"append",
"(",
"[",
"item",
".",
"params",
"[",
"\"key\"",
"]",
",",
"item",
".",
"getContent",
"(",
")",
".",
"strip",
"(",
")",
"]",
")",
"decoded",
"=",
"validator",
".",
"check_structure",
"(",
"decoded",
")",
"return",
"decoded"
] |
Handles decoding of the XML `data`.
Args:
data (str): Data which will be decoded.
Returns:
dict: Dictionary with decoded data.
|
[
"Handles",
"decoding",
"of",
"the",
"XML",
"data",
"."
] |
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
|
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/decoders/parser_xml.py#L41-L87
|
244,913
|
SergeySatskiy/cdm-gc-plugin
|
cdmplugins/gc/__init__.py
|
GCPlugin.activate
|
def activate(self, ideSettings, ideGlobalData):
"""Activates the plugin.
The plugin may override the method to do specific
plugin activation handling.
ideSettings - reference to the IDE Settings singleton
see codimension/src/utils/settings.py
ideGlobalData - reference to the IDE global settings
see codimension/src/utils/globals.py
Note: if overriden do not forget to call the
base class activate()
"""
WizardInterface.activate(self, ideSettings, ideGlobalData)
self.__where = self.__getConfiguredWhere()
self.ide.editorsManager.sigTabClosed.connect(self.__collectGarbage)
self.ide.project.sigProjectChanged.connect(self.__collectGarbage)
|
python
|
def activate(self, ideSettings, ideGlobalData):
"""Activates the plugin.
The plugin may override the method to do specific
plugin activation handling.
ideSettings - reference to the IDE Settings singleton
see codimension/src/utils/settings.py
ideGlobalData - reference to the IDE global settings
see codimension/src/utils/globals.py
Note: if overriden do not forget to call the
base class activate()
"""
WizardInterface.activate(self, ideSettings, ideGlobalData)
self.__where = self.__getConfiguredWhere()
self.ide.editorsManager.sigTabClosed.connect(self.__collectGarbage)
self.ide.project.sigProjectChanged.connect(self.__collectGarbage)
|
[
"def",
"activate",
"(",
"self",
",",
"ideSettings",
",",
"ideGlobalData",
")",
":",
"WizardInterface",
".",
"activate",
"(",
"self",
",",
"ideSettings",
",",
"ideGlobalData",
")",
"self",
".",
"__where",
"=",
"self",
".",
"__getConfiguredWhere",
"(",
")",
"self",
".",
"ide",
".",
"editorsManager",
".",
"sigTabClosed",
".",
"connect",
"(",
"self",
".",
"__collectGarbage",
")",
"self",
".",
"ide",
".",
"project",
".",
"sigProjectChanged",
".",
"connect",
"(",
"self",
".",
"__collectGarbage",
")"
] |
Activates the plugin.
The plugin may override the method to do specific
plugin activation handling.
ideSettings - reference to the IDE Settings singleton
see codimension/src/utils/settings.py
ideGlobalData - reference to the IDE global settings
see codimension/src/utils/globals.py
Note: if overriden do not forget to call the
base class activate()
|
[
"Activates",
"the",
"plugin",
"."
] |
f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df
|
https://github.com/SergeySatskiy/cdm-gc-plugin/blob/f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df/cdmplugins/gc/__init__.py#L51-L70
|
244,914
|
SergeySatskiy/cdm-gc-plugin
|
cdmplugins/gc/__init__.py
|
GCPlugin.deactivate
|
def deactivate(self):
"""Deactivates the plugin.
The plugin may override the method to do specific
plugin deactivation handling.
Note: if overriden do not forget to call the
base class deactivate()
"""
self.ide.project.sigProjectChanged.disconnect(self.__collectGarbage)
self.ide.editorsManager.sigTabClosed.disconnect(self.__collectGarbage)
WizardInterface.deactivate(self)
|
python
|
def deactivate(self):
"""Deactivates the plugin.
The plugin may override the method to do specific
plugin deactivation handling.
Note: if overriden do not forget to call the
base class deactivate()
"""
self.ide.project.sigProjectChanged.disconnect(self.__collectGarbage)
self.ide.editorsManager.sigTabClosed.disconnect(self.__collectGarbage)
WizardInterface.deactivate(self)
|
[
"def",
"deactivate",
"(",
"self",
")",
":",
"self",
".",
"ide",
".",
"project",
".",
"sigProjectChanged",
".",
"disconnect",
"(",
"self",
".",
"__collectGarbage",
")",
"self",
".",
"ide",
".",
"editorsManager",
".",
"sigTabClosed",
".",
"disconnect",
"(",
"self",
".",
"__collectGarbage",
")",
"WizardInterface",
".",
"deactivate",
"(",
"self",
")"
] |
Deactivates the plugin.
The plugin may override the method to do specific
plugin deactivation handling.
Note: if overriden do not forget to call the
base class deactivate()
|
[
"Deactivates",
"the",
"plugin",
"."
] |
f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df
|
https://github.com/SergeySatskiy/cdm-gc-plugin/blob/f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df/cdmplugins/gc/__init__.py#L72-L83
|
244,915
|
SergeySatskiy/cdm-gc-plugin
|
cdmplugins/gc/__init__.py
|
GCPlugin.populateMainMenu
|
def populateMainMenu(self, parentMenu):
"""Populates the main menu.
The main menu looks as follows:
Plugins
- Plugin manager (fixed item)
- Separator (fixed item)
- <Plugin #1 name> (this is the parentMenu passed)
...
If no items were populated by the plugin then there will be no
<Plugin #N name> menu item shown.
It is suggested to insert plugin configuration item here if so.
"""
parentMenu.addAction("Configure", self.configure)
parentMenu.addAction("Collect garbage", self.__collectGarbage)
|
python
|
def populateMainMenu(self, parentMenu):
"""Populates the main menu.
The main menu looks as follows:
Plugins
- Plugin manager (fixed item)
- Separator (fixed item)
- <Plugin #1 name> (this is the parentMenu passed)
...
If no items were populated by the plugin then there will be no
<Plugin #N name> menu item shown.
It is suggested to insert plugin configuration item here if so.
"""
parentMenu.addAction("Configure", self.configure)
parentMenu.addAction("Collect garbage", self.__collectGarbage)
|
[
"def",
"populateMainMenu",
"(",
"self",
",",
"parentMenu",
")",
":",
"parentMenu",
".",
"addAction",
"(",
"\"Configure\"",
",",
"self",
".",
"configure",
")",
"parentMenu",
".",
"addAction",
"(",
"\"Collect garbage\"",
",",
"self",
".",
"__collectGarbage",
")"
] |
Populates the main menu.
The main menu looks as follows:
Plugins
- Plugin manager (fixed item)
- Separator (fixed item)
- <Plugin #1 name> (this is the parentMenu passed)
...
If no items were populated by the plugin then there will be no
<Plugin #N name> menu item shown.
It is suggested to insert plugin configuration item here if so.
|
[
"Populates",
"the",
"main",
"menu",
"."
] |
f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df
|
https://github.com/SergeySatskiy/cdm-gc-plugin/blob/f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df/cdmplugins/gc/__init__.py#L96-L110
|
244,916
|
SergeySatskiy/cdm-gc-plugin
|
cdmplugins/gc/__init__.py
|
GCPlugin.populateBufferContextMenu
|
def populateBufferContextMenu(self, parentMenu):
"""Populates the editing buffer context menu.
The buffer context menu shown for the current edited/viewed file
will have an item with a plugin name and subitems which are
populated here. If no items were populated then the plugin menu
item will not be shown.
Note: when a buffer context menu is selected by the user it always
refers to the current widget. To get access to the current
editing widget the plugin can use: self.ide.currentEditorWidget
The widget could be of different types and some circumstances
should be considered, e.g.:
- it could be a new file which has not been saved yet
- it could be modified
- it could be that the disk file has already been deleted
- etc.
Having the current widget reference the plugin is able to
retrieve the infirmation it needs.
"""
parentMenu.addAction("Configure", self.configure)
parentMenu.addAction("Collect garbage", self.__collectGarbage)
|
python
|
def populateBufferContextMenu(self, parentMenu):
"""Populates the editing buffer context menu.
The buffer context menu shown for the current edited/viewed file
will have an item with a plugin name and subitems which are
populated here. If no items were populated then the plugin menu
item will not be shown.
Note: when a buffer context menu is selected by the user it always
refers to the current widget. To get access to the current
editing widget the plugin can use: self.ide.currentEditorWidget
The widget could be of different types and some circumstances
should be considered, e.g.:
- it could be a new file which has not been saved yet
- it could be modified
- it could be that the disk file has already been deleted
- etc.
Having the current widget reference the plugin is able to
retrieve the infirmation it needs.
"""
parentMenu.addAction("Configure", self.configure)
parentMenu.addAction("Collect garbage", self.__collectGarbage)
|
[
"def",
"populateBufferContextMenu",
"(",
"self",
",",
"parentMenu",
")",
":",
"parentMenu",
".",
"addAction",
"(",
"\"Configure\"",
",",
"self",
".",
"configure",
")",
"parentMenu",
".",
"addAction",
"(",
"\"Collect garbage\"",
",",
"self",
".",
"__collectGarbage",
")"
] |
Populates the editing buffer context menu.
The buffer context menu shown for the current edited/viewed file
will have an item with a plugin name and subitems which are
populated here. If no items were populated then the plugin menu
item will not be shown.
Note: when a buffer context menu is selected by the user it always
refers to the current widget. To get access to the current
editing widget the plugin can use: self.ide.currentEditorWidget
The widget could be of different types and some circumstances
should be considered, e.g.:
- it could be a new file which has not been saved yet
- it could be modified
- it could be that the disk file has already been deleted
- etc.
Having the current widget reference the plugin is able to
retrieve the infirmation it needs.
|
[
"Populates",
"the",
"editing",
"buffer",
"context",
"menu",
"."
] |
f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df
|
https://github.com/SergeySatskiy/cdm-gc-plugin/blob/f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df/cdmplugins/gc/__init__.py#L140-L161
|
244,917
|
SergeySatskiy/cdm-gc-plugin
|
cdmplugins/gc/__init__.py
|
GCPlugin.configure
|
def configure(self):
"""Configures the garbage collector plugin"""
dlg = GCPluginConfigDialog(self.__where)
if dlg.exec_() == QDialog.Accepted:
newWhere = dlg.getCheckedOption()
if newWhere != self.__where:
self.__where = newWhere
self.__saveConfiguredWhere()
|
python
|
def configure(self):
"""Configures the garbage collector plugin"""
dlg = GCPluginConfigDialog(self.__where)
if dlg.exec_() == QDialog.Accepted:
newWhere = dlg.getCheckedOption()
if newWhere != self.__where:
self.__where = newWhere
self.__saveConfiguredWhere()
|
[
"def",
"configure",
"(",
"self",
")",
":",
"dlg",
"=",
"GCPluginConfigDialog",
"(",
"self",
".",
"__where",
")",
"if",
"dlg",
".",
"exec_",
"(",
")",
"==",
"QDialog",
".",
"Accepted",
":",
"newWhere",
"=",
"dlg",
".",
"getCheckedOption",
"(",
")",
"if",
"newWhere",
"!=",
"self",
".",
"__where",
":",
"self",
".",
"__where",
"=",
"newWhere",
"self",
".",
"__saveConfiguredWhere",
"(",
")"
] |
Configures the garbage collector plugin
|
[
"Configures",
"the",
"garbage",
"collector",
"plugin"
] |
f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df
|
https://github.com/SergeySatskiy/cdm-gc-plugin/blob/f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df/cdmplugins/gc/__init__.py#L163-L170
|
244,918
|
SergeySatskiy/cdm-gc-plugin
|
cdmplugins/gc/__init__.py
|
GCPlugin.__getConfiguredWhere
|
def __getConfiguredWhere(self):
"""Provides the saved configured value"""
defaultSettings = {'where': GCPluginConfigDialog.SILENT}
configFile = self.__getConfigFile()
if not os.path.exists(configFile):
values = defaultSettings
else:
values = loadJSON(configFile,
'garbage collector plugin settings',
defaultSettings)
try:
value = values['where']
if value < GCPluginConfigDialog.SILENT or \
value > GCPluginConfigDialog.LOG:
return GCPluginConfigDialog.SILENT
return value
except:
return GCPluginConfigDialog.SILENT
|
python
|
def __getConfiguredWhere(self):
"""Provides the saved configured value"""
defaultSettings = {'where': GCPluginConfigDialog.SILENT}
configFile = self.__getConfigFile()
if not os.path.exists(configFile):
values = defaultSettings
else:
values = loadJSON(configFile,
'garbage collector plugin settings',
defaultSettings)
try:
value = values['where']
if value < GCPluginConfigDialog.SILENT or \
value > GCPluginConfigDialog.LOG:
return GCPluginConfigDialog.SILENT
return value
except:
return GCPluginConfigDialog.SILENT
|
[
"def",
"__getConfiguredWhere",
"(",
"self",
")",
":",
"defaultSettings",
"=",
"{",
"'where'",
":",
"GCPluginConfigDialog",
".",
"SILENT",
"}",
"configFile",
"=",
"self",
".",
"__getConfigFile",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"configFile",
")",
":",
"values",
"=",
"defaultSettings",
"else",
":",
"values",
"=",
"loadJSON",
"(",
"configFile",
",",
"'garbage collector plugin settings'",
",",
"defaultSettings",
")",
"try",
":",
"value",
"=",
"values",
"[",
"'where'",
"]",
"if",
"value",
"<",
"GCPluginConfigDialog",
".",
"SILENT",
"or",
"value",
">",
"GCPluginConfigDialog",
".",
"LOG",
":",
"return",
"GCPluginConfigDialog",
".",
"SILENT",
"return",
"value",
"except",
":",
"return",
"GCPluginConfigDialog",
".",
"SILENT"
] |
Provides the saved configured value
|
[
"Provides",
"the",
"saved",
"configured",
"value"
] |
f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df
|
https://github.com/SergeySatskiy/cdm-gc-plugin/blob/f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df/cdmplugins/gc/__init__.py#L176-L193
|
244,919
|
ronaldguillen/wave
|
wave/renderers.py
|
JSONRenderer.render
|
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `data` into JSON, returning a bytestring.
"""
if data is None:
return bytes()
renderer_context = renderer_context or {}
indent = self.get_indent(accepted_media_type, renderer_context)
if indent is None:
separators = SHORT_SEPARATORS if self.compact else LONG_SEPARATORS
else:
separators = INDENT_SEPARATORS
ret = json.dumps(
data, cls=self.encoder_class,
indent=indent, ensure_ascii=self.ensure_ascii,
separators=separators
)
# On python 2.x json.dumps() returns bytestrings if ensure_ascii=True,
# but if ensure_ascii=False, the return type is underspecified,
# and may (or may not) be unicode.
# On python 3.x json.dumps() returns unicode strings.
if isinstance(ret, six.text_type):
# We always fully escape \u2028 and \u2029 to ensure we output JSON
# that is a strict javascript subset. If bytes were returned
# by json.dumps() then we don't have these characters in any case.
# See: http://timelessrepo.com/json-isnt-a-javascript-subset
ret = ret.replace('\u2028', '\\u2028').replace('\u2029', '\\u2029')
return bytes(ret.encode('utf-8'))
return ret
|
python
|
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `data` into JSON, returning a bytestring.
"""
if data is None:
return bytes()
renderer_context = renderer_context or {}
indent = self.get_indent(accepted_media_type, renderer_context)
if indent is None:
separators = SHORT_SEPARATORS if self.compact else LONG_SEPARATORS
else:
separators = INDENT_SEPARATORS
ret = json.dumps(
data, cls=self.encoder_class,
indent=indent, ensure_ascii=self.ensure_ascii,
separators=separators
)
# On python 2.x json.dumps() returns bytestrings if ensure_ascii=True,
# but if ensure_ascii=False, the return type is underspecified,
# and may (or may not) be unicode.
# On python 3.x json.dumps() returns unicode strings.
if isinstance(ret, six.text_type):
# We always fully escape \u2028 and \u2029 to ensure we output JSON
# that is a strict javascript subset. If bytes were returned
# by json.dumps() then we don't have these characters in any case.
# See: http://timelessrepo.com/json-isnt-a-javascript-subset
ret = ret.replace('\u2028', '\\u2028').replace('\u2029', '\\u2029')
return bytes(ret.encode('utf-8'))
return ret
|
[
"def",
"render",
"(",
"self",
",",
"data",
",",
"accepted_media_type",
"=",
"None",
",",
"renderer_context",
"=",
"None",
")",
":",
"if",
"data",
"is",
"None",
":",
"return",
"bytes",
"(",
")",
"renderer_context",
"=",
"renderer_context",
"or",
"{",
"}",
"indent",
"=",
"self",
".",
"get_indent",
"(",
"accepted_media_type",
",",
"renderer_context",
")",
"if",
"indent",
"is",
"None",
":",
"separators",
"=",
"SHORT_SEPARATORS",
"if",
"self",
".",
"compact",
"else",
"LONG_SEPARATORS",
"else",
":",
"separators",
"=",
"INDENT_SEPARATORS",
"ret",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"cls",
"=",
"self",
".",
"encoder_class",
",",
"indent",
"=",
"indent",
",",
"ensure_ascii",
"=",
"self",
".",
"ensure_ascii",
",",
"separators",
"=",
"separators",
")",
"# On python 2.x json.dumps() returns bytestrings if ensure_ascii=True,",
"# but if ensure_ascii=False, the return type is underspecified,",
"# and may (or may not) be unicode.",
"# On python 3.x json.dumps() returns unicode strings.",
"if",
"isinstance",
"(",
"ret",
",",
"six",
".",
"text_type",
")",
":",
"# We always fully escape \\u2028 and \\u2029 to ensure we output JSON",
"# that is a strict javascript subset. If bytes were returned",
"# by json.dumps() then we don't have these characters in any case.",
"# See: http://timelessrepo.com/json-isnt-a-javascript-subset",
"ret",
"=",
"ret",
".",
"replace",
"(",
"'\\u2028'",
",",
"'\\\\u2028'",
")",
".",
"replace",
"(",
"'\\u2029'",
",",
"'\\\\u2029'",
")",
"return",
"bytes",
"(",
"ret",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"return",
"ret"
] |
Render `data` into JSON, returning a bytestring.
|
[
"Render",
"data",
"into",
"JSON",
"returning",
"a",
"bytestring",
"."
] |
20bb979c917f7634d8257992e6d449dc751256a9
|
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/renderers.py#L86-L118
|
244,920
|
ronaldguillen/wave
|
wave/renderers.py
|
HTMLFormRenderer.render
|
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render serializer data and return an HTML form, as a string.
"""
form = data.serializer
style = renderer_context.get('style', {})
if 'template_pack' not in style:
style['template_pack'] = self.template_pack
style['renderer'] = self
template_pack = style['template_pack'].strip('/')
template_name = template_pack + '/' + self.base_template
template = loader.get_template(template_name)
context = {
'form': form,
'style': style
}
return template_render(template, context)
|
python
|
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render serializer data and return an HTML form, as a string.
"""
form = data.serializer
style = renderer_context.get('style', {})
if 'template_pack' not in style:
style['template_pack'] = self.template_pack
style['renderer'] = self
template_pack = style['template_pack'].strip('/')
template_name = template_pack + '/' + self.base_template
template = loader.get_template(template_name)
context = {
'form': form,
'style': style
}
return template_render(template, context)
|
[
"def",
"render",
"(",
"self",
",",
"data",
",",
"accepted_media_type",
"=",
"None",
",",
"renderer_context",
"=",
"None",
")",
":",
"form",
"=",
"data",
".",
"serializer",
"style",
"=",
"renderer_context",
".",
"get",
"(",
"'style'",
",",
"{",
"}",
")",
"if",
"'template_pack'",
"not",
"in",
"style",
":",
"style",
"[",
"'template_pack'",
"]",
"=",
"self",
".",
"template_pack",
"style",
"[",
"'renderer'",
"]",
"=",
"self",
"template_pack",
"=",
"style",
"[",
"'template_pack'",
"]",
".",
"strip",
"(",
"'/'",
")",
"template_name",
"=",
"template_pack",
"+",
"'/'",
"+",
"self",
".",
"base_template",
"template",
"=",
"loader",
".",
"get_template",
"(",
"template_name",
")",
"context",
"=",
"{",
"'form'",
":",
"form",
",",
"'style'",
":",
"style",
"}",
"return",
"template_render",
"(",
"template",
",",
"context",
")"
] |
Render serializer data and return an HTML form, as a string.
|
[
"Render",
"serializer",
"data",
"and",
"return",
"an",
"HTML",
"form",
"as",
"a",
"string",
"."
] |
20bb979c917f7634d8257992e6d449dc751256a9
|
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/renderers.py#L340-L358
|
244,921
|
ronaldguillen/wave
|
wave/renderers.py
|
BrowsableAPIRenderer.get_content
|
def get_content(self, renderer, data,
accepted_media_type, renderer_context):
"""
Get the content as if it had been rendered by the default
non-documenting renderer.
"""
if not renderer:
return '[No renderers were found]'
renderer_context['indent'] = 4
content = renderer.render(data, accepted_media_type, renderer_context)
render_style = getattr(renderer, 'render_style', 'text')
assert render_style in ['text', 'binary'], 'Expected .render_style ' \
'"text" or "binary", but got "%s"' % render_style
if render_style == 'binary':
return '[%d bytes of binary content]' % len(content)
return content
|
python
|
def get_content(self, renderer, data,
accepted_media_type, renderer_context):
"""
Get the content as if it had been rendered by the default
non-documenting renderer.
"""
if not renderer:
return '[No renderers were found]'
renderer_context['indent'] = 4
content = renderer.render(data, accepted_media_type, renderer_context)
render_style = getattr(renderer, 'render_style', 'text')
assert render_style in ['text', 'binary'], 'Expected .render_style ' \
'"text" or "binary", but got "%s"' % render_style
if render_style == 'binary':
return '[%d bytes of binary content]' % len(content)
return content
|
[
"def",
"get_content",
"(",
"self",
",",
"renderer",
",",
"data",
",",
"accepted_media_type",
",",
"renderer_context",
")",
":",
"if",
"not",
"renderer",
":",
"return",
"'[No renderers were found]'",
"renderer_context",
"[",
"'indent'",
"]",
"=",
"4",
"content",
"=",
"renderer",
".",
"render",
"(",
"data",
",",
"accepted_media_type",
",",
"renderer_context",
")",
"render_style",
"=",
"getattr",
"(",
"renderer",
",",
"'render_style'",
",",
"'text'",
")",
"assert",
"render_style",
"in",
"[",
"'text'",
",",
"'binary'",
"]",
",",
"'Expected .render_style '",
"'\"text\" or \"binary\", but got \"%s\"'",
"%",
"render_style",
"if",
"render_style",
"==",
"'binary'",
":",
"return",
"'[%d bytes of binary content]'",
"%",
"len",
"(",
"content",
")",
"return",
"content"
] |
Get the content as if it had been rendered by the default
non-documenting renderer.
|
[
"Get",
"the",
"content",
"as",
"if",
"it",
"had",
"been",
"rendered",
"by",
"the",
"default",
"non",
"-",
"documenting",
"renderer",
"."
] |
20bb979c917f7634d8257992e6d449dc751256a9
|
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/renderers.py#L388-L406
|
244,922
|
ronaldguillen/wave
|
wave/renderers.py
|
BrowsableAPIRenderer.show_form_for_method
|
def show_form_for_method(self, view, method, request, obj):
"""
Returns True if a form should be shown for this method.
"""
if method not in view.allowed_methods:
return # Not a valid method
try:
view.check_permissions(request)
if obj is not None:
view.check_object_permissions(request, obj)
except exceptions.APIException:
return False # Doesn't have permissions
return True
|
python
|
def show_form_for_method(self, view, method, request, obj):
"""
Returns True if a form should be shown for this method.
"""
if method not in view.allowed_methods:
return # Not a valid method
try:
view.check_permissions(request)
if obj is not None:
view.check_object_permissions(request, obj)
except exceptions.APIException:
return False # Doesn't have permissions
return True
|
[
"def",
"show_form_for_method",
"(",
"self",
",",
"view",
",",
"method",
",",
"request",
",",
"obj",
")",
":",
"if",
"method",
"not",
"in",
"view",
".",
"allowed_methods",
":",
"return",
"# Not a valid method",
"try",
":",
"view",
".",
"check_permissions",
"(",
"request",
")",
"if",
"obj",
"is",
"not",
"None",
":",
"view",
".",
"check_object_permissions",
"(",
"request",
",",
"obj",
")",
"except",
"exceptions",
".",
"APIException",
":",
"return",
"False",
"# Doesn't have permissions",
"return",
"True"
] |
Returns True if a form should be shown for this method.
|
[
"Returns",
"True",
"if",
"a",
"form",
"should",
"be",
"shown",
"for",
"this",
"method",
"."
] |
20bb979c917f7634d8257992e6d449dc751256a9
|
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/renderers.py#L408-L421
|
244,923
|
ronaldguillen/wave
|
wave/renderers.py
|
BrowsableAPIRenderer.get_rendered_html_form
|
def get_rendered_html_form(self, data, view, method, request):
"""
Return a string representing a rendered HTML form, possibly bound to
either the input or output data.
In the absence of the View having an associated form then return None.
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
# If this is valid serializer data, and the form is for the same
# HTTP method as was used in the request then use the existing
# serializer instance, rather than dynamically creating a new one.
if request.method == method and serializer is not None:
try:
kwargs = {'data': request.data}
except ParseError:
kwargs = {}
existing_serializer = serializer
else:
kwargs = {}
existing_serializer = None
with override_method(view, request, method) as request:
if not self.show_form_for_method(view, method, request, instance):
return
if method in ('DELETE', 'OPTIONS'):
return True # Don't actually need to return a form
has_serializer = getattr(view, 'get_serializer', None)
has_serializer_class = getattr(view, 'serializer_class', None)
if (
(not has_serializer and not has_serializer_class) or
not any(is_form_media_type(parser.media_type) for parser in view.parser_classes)
):
return
if existing_serializer is not None:
serializer = existing_serializer
else:
if has_serializer:
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance, **kwargs)
else:
serializer = view.get_serializer(**kwargs)
else:
# at this point we must have a serializer_class
if method in ('PUT', 'PATCH'):
serializer = self._get_serializer(view.serializer_class, view,
request, instance=instance, **kwargs)
else:
serializer = self._get_serializer(view.serializer_class, view,
request, **kwargs)
if hasattr(serializer, 'initial_data'):
serializer.is_valid()
form_renderer = self.form_renderer_class()
return form_renderer.render(
serializer.data,
self.accepted_media_type,
{'style': {'template_pack': 'rest_framework/horizontal'}}
)
|
python
|
def get_rendered_html_form(self, data, view, method, request):
"""
Return a string representing a rendered HTML form, possibly bound to
either the input or output data.
In the absence of the View having an associated form then return None.
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
# If this is valid serializer data, and the form is for the same
# HTTP method as was used in the request then use the existing
# serializer instance, rather than dynamically creating a new one.
if request.method == method and serializer is not None:
try:
kwargs = {'data': request.data}
except ParseError:
kwargs = {}
existing_serializer = serializer
else:
kwargs = {}
existing_serializer = None
with override_method(view, request, method) as request:
if not self.show_form_for_method(view, method, request, instance):
return
if method in ('DELETE', 'OPTIONS'):
return True # Don't actually need to return a form
has_serializer = getattr(view, 'get_serializer', None)
has_serializer_class = getattr(view, 'serializer_class', None)
if (
(not has_serializer and not has_serializer_class) or
not any(is_form_media_type(parser.media_type) for parser in view.parser_classes)
):
return
if existing_serializer is not None:
serializer = existing_serializer
else:
if has_serializer:
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance, **kwargs)
else:
serializer = view.get_serializer(**kwargs)
else:
# at this point we must have a serializer_class
if method in ('PUT', 'PATCH'):
serializer = self._get_serializer(view.serializer_class, view,
request, instance=instance, **kwargs)
else:
serializer = self._get_serializer(view.serializer_class, view,
request, **kwargs)
if hasattr(serializer, 'initial_data'):
serializer.is_valid()
form_renderer = self.form_renderer_class()
return form_renderer.render(
serializer.data,
self.accepted_media_type,
{'style': {'template_pack': 'rest_framework/horizontal'}}
)
|
[
"def",
"get_rendered_html_form",
"(",
"self",
",",
"data",
",",
"view",
",",
"method",
",",
"request",
")",
":",
"# See issue #2089 for refactoring this.",
"serializer",
"=",
"getattr",
"(",
"data",
",",
"'serializer'",
",",
"None",
")",
"if",
"serializer",
"and",
"not",
"getattr",
"(",
"serializer",
",",
"'many'",
",",
"False",
")",
":",
"instance",
"=",
"getattr",
"(",
"serializer",
",",
"'instance'",
",",
"None",
")",
"if",
"isinstance",
"(",
"instance",
",",
"Page",
")",
":",
"instance",
"=",
"None",
"else",
":",
"instance",
"=",
"None",
"# If this is valid serializer data, and the form is for the same",
"# HTTP method as was used in the request then use the existing",
"# serializer instance, rather than dynamically creating a new one.",
"if",
"request",
".",
"method",
"==",
"method",
"and",
"serializer",
"is",
"not",
"None",
":",
"try",
":",
"kwargs",
"=",
"{",
"'data'",
":",
"request",
".",
"data",
"}",
"except",
"ParseError",
":",
"kwargs",
"=",
"{",
"}",
"existing_serializer",
"=",
"serializer",
"else",
":",
"kwargs",
"=",
"{",
"}",
"existing_serializer",
"=",
"None",
"with",
"override_method",
"(",
"view",
",",
"request",
",",
"method",
")",
"as",
"request",
":",
"if",
"not",
"self",
".",
"show_form_for_method",
"(",
"view",
",",
"method",
",",
"request",
",",
"instance",
")",
":",
"return",
"if",
"method",
"in",
"(",
"'DELETE'",
",",
"'OPTIONS'",
")",
":",
"return",
"True",
"# Don't actually need to return a form",
"has_serializer",
"=",
"getattr",
"(",
"view",
",",
"'get_serializer'",
",",
"None",
")",
"has_serializer_class",
"=",
"getattr",
"(",
"view",
",",
"'serializer_class'",
",",
"None",
")",
"if",
"(",
"(",
"not",
"has_serializer",
"and",
"not",
"has_serializer_class",
")",
"or",
"not",
"any",
"(",
"is_form_media_type",
"(",
"parser",
".",
"media_type",
")",
"for",
"parser",
"in",
"view",
".",
"parser_classes",
")",
")",
":",
"return",
"if",
"existing_serializer",
"is",
"not",
"None",
":",
"serializer",
"=",
"existing_serializer",
"else",
":",
"if",
"has_serializer",
":",
"if",
"method",
"in",
"(",
"'PUT'",
",",
"'PATCH'",
")",
":",
"serializer",
"=",
"view",
".",
"get_serializer",
"(",
"instance",
"=",
"instance",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"serializer",
"=",
"view",
".",
"get_serializer",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"# at this point we must have a serializer_class",
"if",
"method",
"in",
"(",
"'PUT'",
",",
"'PATCH'",
")",
":",
"serializer",
"=",
"self",
".",
"_get_serializer",
"(",
"view",
".",
"serializer_class",
",",
"view",
",",
"request",
",",
"instance",
"=",
"instance",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"serializer",
"=",
"self",
".",
"_get_serializer",
"(",
"view",
".",
"serializer_class",
",",
"view",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
"if",
"hasattr",
"(",
"serializer",
",",
"'initial_data'",
")",
":",
"serializer",
".",
"is_valid",
"(",
")",
"form_renderer",
"=",
"self",
".",
"form_renderer_class",
"(",
")",
"return",
"form_renderer",
".",
"render",
"(",
"serializer",
".",
"data",
",",
"self",
".",
"accepted_media_type",
",",
"{",
"'style'",
":",
"{",
"'template_pack'",
":",
"'rest_framework/horizontal'",
"}",
"}",
")"
] |
Return a string representing a rendered HTML form, possibly bound to
either the input or output data.
In the absence of the View having an associated form then return None.
|
[
"Return",
"a",
"string",
"representing",
"a",
"rendered",
"HTML",
"form",
"possibly",
"bound",
"to",
"either",
"the",
"input",
"or",
"output",
"data",
"."
] |
20bb979c917f7634d8257992e6d449dc751256a9
|
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/renderers.py#L431-L501
|
244,924
|
ronaldguillen/wave
|
wave/renderers.py
|
BrowsableAPIRenderer.get_context
|
def get_context(self, data, accepted_media_type, renderer_context):
"""
Returns the context used to render.
"""
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
renderer = self.get_default_renderer(view)
raw_data_post_form = self.get_raw_data_form(data, view, 'POST', request)
raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request)
raw_data_patch_form = self.get_raw_data_form(data, view, 'PATCH', request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
response_headers = OrderedDict(sorted(response.items()))
renderer_content_type = ''
if renderer:
renderer_content_type = '%s' % renderer.media_type
if renderer.charset:
renderer_content_type += ' ;%s' % renderer.charset
response_headers['Content-Type'] = renderer_content_type
if getattr(view, 'paginator', None) and view.paginator.display_page_controls:
paginator = view.paginator
else:
paginator = None
context = {
'content': self.get_content(renderer, data, accepted_media_type, renderer_context),
'view': view,
'request': request,
'response': response,
'description': self.get_description(view, response.status_code),
'name': self.get_name(view),
'version': VERSION,
'paginator': paginator,
'breadcrumblist': self.get_breadcrumbs(request),
'allowed_methods': view.allowed_methods,
'available_formats': [renderer_cls.format for renderer_cls in view.renderer_classes],
'response_headers': response_headers,
'put_form': self.get_rendered_html_form(data, view, 'PUT', request),
'post_form': self.get_rendered_html_form(data, view, 'POST', request),
'delete_form': self.get_rendered_html_form(data, view, 'DELETE', request),
'options_form': self.get_rendered_html_form(data, view, 'OPTIONS', request),
'filter_form': self.get_filter_form(data, view, request),
'raw_data_put_form': raw_data_put_form,
'raw_data_post_form': raw_data_post_form,
'raw_data_patch_form': raw_data_patch_form,
'raw_data_put_or_patch_form': raw_data_put_or_patch_form,
'display_edit_forms': bool(response.status_code != 403),
'api_settings': api_settings
}
return context
|
python
|
def get_context(self, data, accepted_media_type, renderer_context):
"""
Returns the context used to render.
"""
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
renderer = self.get_default_renderer(view)
raw_data_post_form = self.get_raw_data_form(data, view, 'POST', request)
raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request)
raw_data_patch_form = self.get_raw_data_form(data, view, 'PATCH', request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
response_headers = OrderedDict(sorted(response.items()))
renderer_content_type = ''
if renderer:
renderer_content_type = '%s' % renderer.media_type
if renderer.charset:
renderer_content_type += ' ;%s' % renderer.charset
response_headers['Content-Type'] = renderer_content_type
if getattr(view, 'paginator', None) and view.paginator.display_page_controls:
paginator = view.paginator
else:
paginator = None
context = {
'content': self.get_content(renderer, data, accepted_media_type, renderer_context),
'view': view,
'request': request,
'response': response,
'description': self.get_description(view, response.status_code),
'name': self.get_name(view),
'version': VERSION,
'paginator': paginator,
'breadcrumblist': self.get_breadcrumbs(request),
'allowed_methods': view.allowed_methods,
'available_formats': [renderer_cls.format for renderer_cls in view.renderer_classes],
'response_headers': response_headers,
'put_form': self.get_rendered_html_form(data, view, 'PUT', request),
'post_form': self.get_rendered_html_form(data, view, 'POST', request),
'delete_form': self.get_rendered_html_form(data, view, 'DELETE', request),
'options_form': self.get_rendered_html_form(data, view, 'OPTIONS', request),
'filter_form': self.get_filter_form(data, view, request),
'raw_data_put_form': raw_data_put_form,
'raw_data_post_form': raw_data_post_form,
'raw_data_patch_form': raw_data_patch_form,
'raw_data_put_or_patch_form': raw_data_put_or_patch_form,
'display_edit_forms': bool(response.status_code != 403),
'api_settings': api_settings
}
return context
|
[
"def",
"get_context",
"(",
"self",
",",
"data",
",",
"accepted_media_type",
",",
"renderer_context",
")",
":",
"view",
"=",
"renderer_context",
"[",
"'view'",
"]",
"request",
"=",
"renderer_context",
"[",
"'request'",
"]",
"response",
"=",
"renderer_context",
"[",
"'response'",
"]",
"renderer",
"=",
"self",
".",
"get_default_renderer",
"(",
"view",
")",
"raw_data_post_form",
"=",
"self",
".",
"get_raw_data_form",
"(",
"data",
",",
"view",
",",
"'POST'",
",",
"request",
")",
"raw_data_put_form",
"=",
"self",
".",
"get_raw_data_form",
"(",
"data",
",",
"view",
",",
"'PUT'",
",",
"request",
")",
"raw_data_patch_form",
"=",
"self",
".",
"get_raw_data_form",
"(",
"data",
",",
"view",
",",
"'PATCH'",
",",
"request",
")",
"raw_data_put_or_patch_form",
"=",
"raw_data_put_form",
"or",
"raw_data_patch_form",
"response_headers",
"=",
"OrderedDict",
"(",
"sorted",
"(",
"response",
".",
"items",
"(",
")",
")",
")",
"renderer_content_type",
"=",
"''",
"if",
"renderer",
":",
"renderer_content_type",
"=",
"'%s'",
"%",
"renderer",
".",
"media_type",
"if",
"renderer",
".",
"charset",
":",
"renderer_content_type",
"+=",
"' ;%s'",
"%",
"renderer",
".",
"charset",
"response_headers",
"[",
"'Content-Type'",
"]",
"=",
"renderer_content_type",
"if",
"getattr",
"(",
"view",
",",
"'paginator'",
",",
"None",
")",
"and",
"view",
".",
"paginator",
".",
"display_page_controls",
":",
"paginator",
"=",
"view",
".",
"paginator",
"else",
":",
"paginator",
"=",
"None",
"context",
"=",
"{",
"'content'",
":",
"self",
".",
"get_content",
"(",
"renderer",
",",
"data",
",",
"accepted_media_type",
",",
"renderer_context",
")",
",",
"'view'",
":",
"view",
",",
"'request'",
":",
"request",
",",
"'response'",
":",
"response",
",",
"'description'",
":",
"self",
".",
"get_description",
"(",
"view",
",",
"response",
".",
"status_code",
")",
",",
"'name'",
":",
"self",
".",
"get_name",
"(",
"view",
")",
",",
"'version'",
":",
"VERSION",
",",
"'paginator'",
":",
"paginator",
",",
"'breadcrumblist'",
":",
"self",
".",
"get_breadcrumbs",
"(",
"request",
")",
",",
"'allowed_methods'",
":",
"view",
".",
"allowed_methods",
",",
"'available_formats'",
":",
"[",
"renderer_cls",
".",
"format",
"for",
"renderer_cls",
"in",
"view",
".",
"renderer_classes",
"]",
",",
"'response_headers'",
":",
"response_headers",
",",
"'put_form'",
":",
"self",
".",
"get_rendered_html_form",
"(",
"data",
",",
"view",
",",
"'PUT'",
",",
"request",
")",
",",
"'post_form'",
":",
"self",
".",
"get_rendered_html_form",
"(",
"data",
",",
"view",
",",
"'POST'",
",",
"request",
")",
",",
"'delete_form'",
":",
"self",
".",
"get_rendered_html_form",
"(",
"data",
",",
"view",
",",
"'DELETE'",
",",
"request",
")",
",",
"'options_form'",
":",
"self",
".",
"get_rendered_html_form",
"(",
"data",
",",
"view",
",",
"'OPTIONS'",
",",
"request",
")",
",",
"'filter_form'",
":",
"self",
".",
"get_filter_form",
"(",
"data",
",",
"view",
",",
"request",
")",
",",
"'raw_data_put_form'",
":",
"raw_data_put_form",
",",
"'raw_data_post_form'",
":",
"raw_data_post_form",
",",
"'raw_data_patch_form'",
":",
"raw_data_patch_form",
",",
"'raw_data_put_or_patch_form'",
":",
"raw_data_put_or_patch_form",
",",
"'display_edit_forms'",
":",
"bool",
"(",
"response",
".",
"status_code",
"!=",
"403",
")",
",",
"'api_settings'",
":",
"api_settings",
"}",
"return",
"context"
] |
Returns the context used to render.
|
[
"Returns",
"the",
"context",
"used",
"to",
"render",
"."
] |
20bb979c917f7634d8257992e6d449dc751256a9
|
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/renderers.py#L607-L665
|
244,925
|
radjkarl/fancyTools
|
fancytools/os/setup.py
|
setup
|
def setup(package, **kwargs):
"""a template for the python setup.py installer routine
* take setup information from the packages __init__.py file
* this way these informations, like...
- __email__
- __version__
- __depencies__
are still available after installation
* exclude /tests*
* create scripts from all files in /bin
* create the long description from
- /README.rst
- /CHANGES.rst
- /AUTHORS.rst
* remove /build at the end
"""
def read(*paths):
"""Build a file path from *paths* and return the contents."""
p = os.path.join(*paths)
if os.path.exists(p):
with open(p, 'r') as f:
return f.read()
return ''
setuptoolsSetup(
name=package.__name__,
version=package.__version__,
author=package.__author__,
author_email=package.__email__,
url=package.__url__,
license=package.__license__,
install_requires=package.__depencies__,
classifiers=package.__classifiers__,
description=package.__description__,
packages=find_packages(exclude=['tests*']),
include_package_data=True,
scripts=[] if not os.path.exists('bin') else [
os.path.join('bin', x) for x in os.listdir('bin')],
long_description=(
read('README.rst') + '\n\n' +
read('CHANGES.rst') + '\n\n' +
read('AUTHORS.rst')),
**kwargs
)
# remove the build
# else old and notexistent files could come again in the installed pkg
mainPath = os.path.abspath(os.path.dirname(__file__))
bPath = os.path.join(mainPath, 'build')
if os.path.exists(bPath):
shutil.rmtree(bPath)
|
python
|
def setup(package, **kwargs):
"""a template for the python setup.py installer routine
* take setup information from the packages __init__.py file
* this way these informations, like...
- __email__
- __version__
- __depencies__
are still available after installation
* exclude /tests*
* create scripts from all files in /bin
* create the long description from
- /README.rst
- /CHANGES.rst
- /AUTHORS.rst
* remove /build at the end
"""
def read(*paths):
"""Build a file path from *paths* and return the contents."""
p = os.path.join(*paths)
if os.path.exists(p):
with open(p, 'r') as f:
return f.read()
return ''
setuptoolsSetup(
name=package.__name__,
version=package.__version__,
author=package.__author__,
author_email=package.__email__,
url=package.__url__,
license=package.__license__,
install_requires=package.__depencies__,
classifiers=package.__classifiers__,
description=package.__description__,
packages=find_packages(exclude=['tests*']),
include_package_data=True,
scripts=[] if not os.path.exists('bin') else [
os.path.join('bin', x) for x in os.listdir('bin')],
long_description=(
read('README.rst') + '\n\n' +
read('CHANGES.rst') + '\n\n' +
read('AUTHORS.rst')),
**kwargs
)
# remove the build
# else old and notexistent files could come again in the installed pkg
mainPath = os.path.abspath(os.path.dirname(__file__))
bPath = os.path.join(mainPath, 'build')
if os.path.exists(bPath):
shutil.rmtree(bPath)
|
[
"def",
"setup",
"(",
"package",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"read",
"(",
"*",
"paths",
")",
":",
"\"\"\"Build a file path from *paths* and return the contents.\"\"\"",
"p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"paths",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"p",
")",
":",
"with",
"open",
"(",
"p",
",",
"'r'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")",
"return",
"''",
"setuptoolsSetup",
"(",
"name",
"=",
"package",
".",
"__name__",
",",
"version",
"=",
"package",
".",
"__version__",
",",
"author",
"=",
"package",
".",
"__author__",
",",
"author_email",
"=",
"package",
".",
"__email__",
",",
"url",
"=",
"package",
".",
"__url__",
",",
"license",
"=",
"package",
".",
"__license__",
",",
"install_requires",
"=",
"package",
".",
"__depencies__",
",",
"classifiers",
"=",
"package",
".",
"__classifiers__",
",",
"description",
"=",
"package",
".",
"__description__",
",",
"packages",
"=",
"find_packages",
"(",
"exclude",
"=",
"[",
"'tests*'",
"]",
")",
",",
"include_package_data",
"=",
"True",
",",
"scripts",
"=",
"[",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"'bin'",
")",
"else",
"[",
"os",
".",
"path",
".",
"join",
"(",
"'bin'",
",",
"x",
")",
"for",
"x",
"in",
"os",
".",
"listdir",
"(",
"'bin'",
")",
"]",
",",
"long_description",
"=",
"(",
"read",
"(",
"'README.rst'",
")",
"+",
"'\\n\\n'",
"+",
"read",
"(",
"'CHANGES.rst'",
")",
"+",
"'\\n\\n'",
"+",
"read",
"(",
"'AUTHORS.rst'",
")",
")",
",",
"*",
"*",
"kwargs",
")",
"# remove the build",
"# else old and notexistent files could come again in the installed pkg",
"mainPath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"bPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"mainPath",
",",
"'build'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"bPath",
")",
":",
"shutil",
".",
"rmtree",
"(",
"bPath",
")"
] |
a template for the python setup.py installer routine
* take setup information from the packages __init__.py file
* this way these informations, like...
- __email__
- __version__
- __depencies__
are still available after installation
* exclude /tests*
* create scripts from all files in /bin
* create the long description from
- /README.rst
- /CHANGES.rst
- /AUTHORS.rst
* remove /build at the end
|
[
"a",
"template",
"for",
"the",
"python",
"setup",
".",
"py",
"installer",
"routine"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/os/setup.py#L9-L62
|
244,926
|
pip-services3-python/pip-services3-components-python
|
pip_services3_components/config/YamlConfigReader.py
|
YamlConfigReader._read_object
|
def _read_object(self, correlation_id, parameters):
"""
Reads configuration file, parameterizes its content and converts it into JSON object.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration.
:return: a JSON object with configuration.
"""
path = self.get_path()
if path == None:
raise ConfigException(correlation_id, "NO_PATH", "Missing config file path")
if not os.path.isfile(path):
raise FileException(correlation_id, 'FILE_NOT_FOUND', 'Config file was not found at ' + path)
try:
with open(path, 'r') as file:
config = file.read()
config = self._parameterize(config, parameters)
return yaml.load(config)
except Exception as ex:
raise FileException(
correlation_id,
"READ_FAILED",
"Failed reading configuration " + path + ": " + str(ex)
).with_details("path", path).with_cause(ex)
|
python
|
def _read_object(self, correlation_id, parameters):
"""
Reads configuration file, parameterizes its content and converts it into JSON object.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration.
:return: a JSON object with configuration.
"""
path = self.get_path()
if path == None:
raise ConfigException(correlation_id, "NO_PATH", "Missing config file path")
if not os.path.isfile(path):
raise FileException(correlation_id, 'FILE_NOT_FOUND', 'Config file was not found at ' + path)
try:
with open(path, 'r') as file:
config = file.read()
config = self._parameterize(config, parameters)
return yaml.load(config)
except Exception as ex:
raise FileException(
correlation_id,
"READ_FAILED",
"Failed reading configuration " + path + ": " + str(ex)
).with_details("path", path).with_cause(ex)
|
[
"def",
"_read_object",
"(",
"self",
",",
"correlation_id",
",",
"parameters",
")",
":",
"path",
"=",
"self",
".",
"get_path",
"(",
")",
"if",
"path",
"==",
"None",
":",
"raise",
"ConfigException",
"(",
"correlation_id",
",",
"\"NO_PATH\"",
",",
"\"Missing config file path\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"raise",
"FileException",
"(",
"correlation_id",
",",
"'FILE_NOT_FOUND'",
",",
"'Config file was not found at '",
"+",
"path",
")",
"try",
":",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"file",
":",
"config",
"=",
"file",
".",
"read",
"(",
")",
"config",
"=",
"self",
".",
"_parameterize",
"(",
"config",
",",
"parameters",
")",
"return",
"yaml",
".",
"load",
"(",
"config",
")",
"except",
"Exception",
"as",
"ex",
":",
"raise",
"FileException",
"(",
"correlation_id",
",",
"\"READ_FAILED\"",
",",
"\"Failed reading configuration \"",
"+",
"path",
"+",
"\": \"",
"+",
"str",
"(",
"ex",
")",
")",
".",
"with_details",
"(",
"\"path\"",
",",
"path",
")",
".",
"with_cause",
"(",
"ex",
")"
] |
Reads configuration file, parameterizes its content and converts it into JSON object.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration.
:return: a JSON object with configuration.
|
[
"Reads",
"configuration",
"file",
"parameterizes",
"its",
"content",
"and",
"converts",
"it",
"into",
"JSON",
"object",
"."
] |
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
|
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/config/YamlConfigReader.py#L48-L76
|
244,927
|
pip-services3-python/pip-services3-components-python
|
pip_services3_components/config/YamlConfigReader.py
|
YamlConfigReader.read_config
|
def read_config(self, correlation_id, parameters):
"""
Reads configuration and parameterize it with given values.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration or null to skip parameterization.
:return: ConfigParams configuration.
"""
value = self._read_object(correlation_id, parameters)
return ConfigParams.from_value(value)
|
python
|
def read_config(self, correlation_id, parameters):
"""
Reads configuration and parameterize it with given values.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration or null to skip parameterization.
:return: ConfigParams configuration.
"""
value = self._read_object(correlation_id, parameters)
return ConfigParams.from_value(value)
|
[
"def",
"read_config",
"(",
"self",
",",
"correlation_id",
",",
"parameters",
")",
":",
"value",
"=",
"self",
".",
"_read_object",
"(",
"correlation_id",
",",
"parameters",
")",
"return",
"ConfigParams",
".",
"from_value",
"(",
"value",
")"
] |
Reads configuration and parameterize it with given values.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration or null to skip parameterization.
:return: ConfigParams configuration.
|
[
"Reads",
"configuration",
"and",
"parameterize",
"it",
"with",
"given",
"values",
"."
] |
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
|
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/config/YamlConfigReader.py#L78-L89
|
244,928
|
pip-services3-python/pip-services3-components-python
|
pip_services3_components/config/YamlConfigReader.py
|
YamlConfigReader._read_config
|
def _read_config(correlation_id, path, parameters):
"""
Reads configuration from a file, parameterize it with given values and returns a new ConfigParams object.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param path: a path to configuration file.
:param parameters: values to parameters the configuration.
:return: ConfigParams configuration.
"""
value = YamlConfigReader(path)._read_object(correlation_id, parameters)
return ConfigParams.from_value(value)
|
python
|
def _read_config(correlation_id, path, parameters):
"""
Reads configuration from a file, parameterize it with given values and returns a new ConfigParams object.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param path: a path to configuration file.
:param parameters: values to parameters the configuration.
:return: ConfigParams configuration.
"""
value = YamlConfigReader(path)._read_object(correlation_id, parameters)
return ConfigParams.from_value(value)
|
[
"def",
"_read_config",
"(",
"correlation_id",
",",
"path",
",",
"parameters",
")",
":",
"value",
"=",
"YamlConfigReader",
"(",
"path",
")",
".",
"_read_object",
"(",
"correlation_id",
",",
"parameters",
")",
"return",
"ConfigParams",
".",
"from_value",
"(",
"value",
")"
] |
Reads configuration from a file, parameterize it with given values and returns a new ConfigParams object.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param path: a path to configuration file.
:param parameters: values to parameters the configuration.
:return: ConfigParams configuration.
|
[
"Reads",
"configuration",
"from",
"a",
"file",
"parameterize",
"it",
"with",
"given",
"values",
"and",
"returns",
"a",
"new",
"ConfigParams",
"object",
"."
] |
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
|
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/config/YamlConfigReader.py#L107-L120
|
244,929
|
itsnauman/jumprun
|
jumprun.py
|
JumpRun.run
|
def run(self):
"""
Perform the specified action
"""
if self.args['add']:
self.action_add()
elif self.args['rm']:
self.action_rm()
elif self.args['show']:
self.action_show()
elif self.args['rename']:
self.action_rename()
else:
self.action_run_command()
|
python
|
def run(self):
"""
Perform the specified action
"""
if self.args['add']:
self.action_add()
elif self.args['rm']:
self.action_rm()
elif self.args['show']:
self.action_show()
elif self.args['rename']:
self.action_rename()
else:
self.action_run_command()
|
[
"def",
"run",
"(",
"self",
")",
":",
"if",
"self",
".",
"args",
"[",
"'add'",
"]",
":",
"self",
".",
"action_add",
"(",
")",
"elif",
"self",
".",
"args",
"[",
"'rm'",
"]",
":",
"self",
".",
"action_rm",
"(",
")",
"elif",
"self",
".",
"args",
"[",
"'show'",
"]",
":",
"self",
".",
"action_show",
"(",
")",
"elif",
"self",
".",
"args",
"[",
"'rename'",
"]",
":",
"self",
".",
"action_rename",
"(",
")",
"else",
":",
"self",
".",
"action_run_command",
"(",
")"
] |
Perform the specified action
|
[
"Perform",
"the",
"specified",
"action"
] |
469436720533e9a601226ec1414f294d94d68a53
|
https://github.com/itsnauman/jumprun/blob/469436720533e9a601226ec1414f294d94d68a53/jumprun.py#L95-L113
|
244,930
|
itsnauman/jumprun
|
jumprun.py
|
JumpRun.init_db
|
def init_db(self):
"""
Init database and prepare tables
"""
# database file
db_path = self.get_data_file("data.sqlite")
# comect and create cursor
self.db = sqlite3.connect(db_path)
self.cursor = self.db.cursor()
# prep tables
self.db_exec('''
CREATE TABLE IF NOT EXISTS shortcuts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
path TEXT NOT NULL,
command TEXT NOT NULL
)
''')
|
python
|
def init_db(self):
"""
Init database and prepare tables
"""
# database file
db_path = self.get_data_file("data.sqlite")
# comect and create cursor
self.db = sqlite3.connect(db_path)
self.cursor = self.db.cursor()
# prep tables
self.db_exec('''
CREATE TABLE IF NOT EXISTS shortcuts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
path TEXT NOT NULL,
command TEXT NOT NULL
)
''')
|
[
"def",
"init_db",
"(",
"self",
")",
":",
"# database file",
"db_path",
"=",
"self",
".",
"get_data_file",
"(",
"\"data.sqlite\"",
")",
"# comect and create cursor",
"self",
".",
"db",
"=",
"sqlite3",
".",
"connect",
"(",
"db_path",
")",
"self",
".",
"cursor",
"=",
"self",
".",
"db",
".",
"cursor",
"(",
")",
"# prep tables",
"self",
".",
"db_exec",
"(",
"'''\n CREATE TABLE IF NOT EXISTS shortcuts (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT NOT NULL,\n path TEXT NOT NULL,\n command TEXT NOT NULL\n )\n '''",
")"
] |
Init database and prepare tables
|
[
"Init",
"database",
"and",
"prepare",
"tables"
] |
469436720533e9a601226ec1414f294d94d68a53
|
https://github.com/itsnauman/jumprun/blob/469436720533e9a601226ec1414f294d94d68a53/jumprun.py#L121-L141
|
244,931
|
itsnauman/jumprun
|
jumprun.py
|
JumpRun.shortcut_str
|
def shortcut_str(self, path, cmd):
"""
Get a string with colors describing a shortcut
"""
s = colored('| path = ', 'cyan') + colored(path, 'yellow') + '\n' \
+ colored('| cmd = ', 'cyan') + \
colored(cmd, 'green', attrs=['bold'])
return s
|
python
|
def shortcut_str(self, path, cmd):
"""
Get a string with colors describing a shortcut
"""
s = colored('| path = ', 'cyan') + colored(path, 'yellow') + '\n' \
+ colored('| cmd = ', 'cyan') + \
colored(cmd, 'green', attrs=['bold'])
return s
|
[
"def",
"shortcut_str",
"(",
"self",
",",
"path",
",",
"cmd",
")",
":",
"s",
"=",
"colored",
"(",
"'| path = '",
",",
"'cyan'",
")",
"+",
"colored",
"(",
"path",
",",
"'yellow'",
")",
"+",
"'\\n'",
"+",
"colored",
"(",
"'| cmd = '",
",",
"'cyan'",
")",
"+",
"colored",
"(",
"cmd",
",",
"'green'",
",",
"attrs",
"=",
"[",
"'bold'",
"]",
")",
"return",
"s"
] |
Get a string with colors describing a shortcut
|
[
"Get",
"a",
"string",
"with",
"colors",
"describing",
"a",
"shortcut"
] |
469436720533e9a601226ec1414f294d94d68a53
|
https://github.com/itsnauman/jumprun/blob/469436720533e9a601226ec1414f294d94d68a53/jumprun.py#L191-L200
|
244,932
|
fogcitymarathoner/s3_mysql_backup
|
s3_mysql_backup/scripts/delete_bucket.py
|
delete_bucket
|
def delete_bucket():
"""
Delete S3 Bucket
"""
args = parser.parse_args
s3_bucket(args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name)().delete()
|
python
|
def delete_bucket():
"""
Delete S3 Bucket
"""
args = parser.parse_args
s3_bucket(args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name)().delete()
|
[
"def",
"delete_bucket",
"(",
")",
":",
"args",
"=",
"parser",
".",
"parse_args",
"s3_bucket",
"(",
"args",
".",
"aws_access_key_id",
",",
"args",
".",
"aws_secret_access_key",
",",
"args",
".",
"bucket_name",
")",
"(",
")",
".",
"delete",
"(",
")"
] |
Delete S3 Bucket
|
[
"Delete",
"S3",
"Bucket"
] |
8a0fb3e51a7b873eb4287d4954548a0dbab0e734
|
https://github.com/fogcitymarathoner/s3_mysql_backup/blob/8a0fb3e51a7b873eb4287d4954548a0dbab0e734/s3_mysql_backup/scripts/delete_bucket.py#L13-L18
|
244,933
|
collectiveacuity/labPack
|
labpack/platforms/aws/ec2.py
|
ec2Client.check_instance_status
|
def check_instance_status(self, instance_id, wait=True):
'''
a method to wait until AWS instance reports an OK status
:param instance_id: string of instance id on AWS
:param wait: [optional] boolean to wait for instance while initializing
:return: True
'''
title = '%s.check_instance_status' % self.__class__.__name__
# validate inputs
input_fields = {
'instance_id': instance_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# notify status check
self.iam.printer('Querying AWS region %s for status of instance %s.' % (self.iam.region_name, instance_id))
# check state of instance
self.iam.printer_on = False
self.check_instance_state(instance_id)
self.iam.printer_on = True
# check instance status
response = self.connection.describe_instance_status(
InstanceIds=[ instance_id ]
)
if not response['InstanceStatuses']:
from time import sleep
from timeit import timeit as timer
sleep(1)
response = self.connection.describe_instance_status(
InstanceIds=[ instance_id ]
)
self.iam.printer(response)
instance_status = response['InstanceStatuses'][0]['InstanceStatus']['Status']
# wait for instance status to be ok
if instance_status != 'ok' and wait:
from time import sleep
from timeit import timeit as timer
self.iam.printer('Waiting for initialization of instance %s to stop' % instance_id, flush=True)
delay = 3
status_timeout = 0
while instance_status != 'ok':
self.iam.printer('.', flush=True)
sleep(delay)
t3 = timer()
response = self.connection.describe_instance_status(
InstanceIds=[ instance_id ]
)
t4 = timer()
response_time = t4 - t3
if 3 - response_time > 0:
delay = 3 - response_time
else:
delay = 0
status_timeout += 1
if status_timeout > 300:
raise Exception('\nTimeout. Failure initializing instance %s on AWS in less than 15min' % instance_id)
instance_status = response['InstanceStatuses'][0]['InstanceStatus']['Status']
print(' done.')
# report outcome
self.iam.printer('Instance %s is %s.' % (instance_id, instance_status))
return instance_status
|
python
|
def check_instance_status(self, instance_id, wait=True):
'''
a method to wait until AWS instance reports an OK status
:param instance_id: string of instance id on AWS
:param wait: [optional] boolean to wait for instance while initializing
:return: True
'''
title = '%s.check_instance_status' % self.__class__.__name__
# validate inputs
input_fields = {
'instance_id': instance_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# notify status check
self.iam.printer('Querying AWS region %s for status of instance %s.' % (self.iam.region_name, instance_id))
# check state of instance
self.iam.printer_on = False
self.check_instance_state(instance_id)
self.iam.printer_on = True
# check instance status
response = self.connection.describe_instance_status(
InstanceIds=[ instance_id ]
)
if not response['InstanceStatuses']:
from time import sleep
from timeit import timeit as timer
sleep(1)
response = self.connection.describe_instance_status(
InstanceIds=[ instance_id ]
)
self.iam.printer(response)
instance_status = response['InstanceStatuses'][0]['InstanceStatus']['Status']
# wait for instance status to be ok
if instance_status != 'ok' and wait:
from time import sleep
from timeit import timeit as timer
self.iam.printer('Waiting for initialization of instance %s to stop' % instance_id, flush=True)
delay = 3
status_timeout = 0
while instance_status != 'ok':
self.iam.printer('.', flush=True)
sleep(delay)
t3 = timer()
response = self.connection.describe_instance_status(
InstanceIds=[ instance_id ]
)
t4 = timer()
response_time = t4 - t3
if 3 - response_time > 0:
delay = 3 - response_time
else:
delay = 0
status_timeout += 1
if status_timeout > 300:
raise Exception('\nTimeout. Failure initializing instance %s on AWS in less than 15min' % instance_id)
instance_status = response['InstanceStatuses'][0]['InstanceStatus']['Status']
print(' done.')
# report outcome
self.iam.printer('Instance %s is %s.' % (instance_id, instance_status))
return instance_status
|
[
"def",
"check_instance_status",
"(",
"self",
",",
"instance_id",
",",
"wait",
"=",
"True",
")",
":",
"title",
"=",
"'%s.check_instance_status'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'instance_id'",
":",
"instance_id",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# notify status check",
"self",
".",
"iam",
".",
"printer",
"(",
"'Querying AWS region %s for status of instance %s.'",
"%",
"(",
"self",
".",
"iam",
".",
"region_name",
",",
"instance_id",
")",
")",
"# check state of instance",
"self",
".",
"iam",
".",
"printer_on",
"=",
"False",
"self",
".",
"check_instance_state",
"(",
"instance_id",
")",
"self",
".",
"iam",
".",
"printer_on",
"=",
"True",
"# check instance status",
"response",
"=",
"self",
".",
"connection",
".",
"describe_instance_status",
"(",
"InstanceIds",
"=",
"[",
"instance_id",
"]",
")",
"if",
"not",
"response",
"[",
"'InstanceStatuses'",
"]",
":",
"from",
"time",
"import",
"sleep",
"from",
"timeit",
"import",
"timeit",
"as",
"timer",
"sleep",
"(",
"1",
")",
"response",
"=",
"self",
".",
"connection",
".",
"describe_instance_status",
"(",
"InstanceIds",
"=",
"[",
"instance_id",
"]",
")",
"self",
".",
"iam",
".",
"printer",
"(",
"response",
")",
"instance_status",
"=",
"response",
"[",
"'InstanceStatuses'",
"]",
"[",
"0",
"]",
"[",
"'InstanceStatus'",
"]",
"[",
"'Status'",
"]",
"# wait for instance status to be ok",
"if",
"instance_status",
"!=",
"'ok'",
"and",
"wait",
":",
"from",
"time",
"import",
"sleep",
"from",
"timeit",
"import",
"timeit",
"as",
"timer",
"self",
".",
"iam",
".",
"printer",
"(",
"'Waiting for initialization of instance %s to stop'",
"%",
"instance_id",
",",
"flush",
"=",
"True",
")",
"delay",
"=",
"3",
"status_timeout",
"=",
"0",
"while",
"instance_status",
"!=",
"'ok'",
":",
"self",
".",
"iam",
".",
"printer",
"(",
"'.'",
",",
"flush",
"=",
"True",
")",
"sleep",
"(",
"delay",
")",
"t3",
"=",
"timer",
"(",
")",
"response",
"=",
"self",
".",
"connection",
".",
"describe_instance_status",
"(",
"InstanceIds",
"=",
"[",
"instance_id",
"]",
")",
"t4",
"=",
"timer",
"(",
")",
"response_time",
"=",
"t4",
"-",
"t3",
"if",
"3",
"-",
"response_time",
">",
"0",
":",
"delay",
"=",
"3",
"-",
"response_time",
"else",
":",
"delay",
"=",
"0",
"status_timeout",
"+=",
"1",
"if",
"status_timeout",
">",
"300",
":",
"raise",
"Exception",
"(",
"'\\nTimeout. Failure initializing instance %s on AWS in less than 15min'",
"%",
"instance_id",
")",
"instance_status",
"=",
"response",
"[",
"'InstanceStatuses'",
"]",
"[",
"0",
"]",
"[",
"'InstanceStatus'",
"]",
"[",
"'Status'",
"]",
"print",
"(",
"' done.'",
")",
"# report outcome",
"self",
".",
"iam",
".",
"printer",
"(",
"'Instance %s is %s.'",
"%",
"(",
"instance_id",
",",
"instance_status",
")",
")",
"return",
"instance_status"
] |
a method to wait until AWS instance reports an OK status
:param instance_id: string of instance id on AWS
:param wait: [optional] boolean to wait for instance while initializing
:return: True
|
[
"a",
"method",
"to",
"wait",
"until",
"AWS",
"instance",
"reports",
"an",
"OK",
"status"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L162-L233
|
244,934
|
collectiveacuity/labPack
|
labpack/platforms/aws/ec2.py
|
ec2Client.list_instances
|
def list_instances(self, tag_values=None):
'''
a method to retrieve the list of instances on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of strings with instance AWS ids
'''
title = '%s.list_instances' % self.__class__.__name__
# validate inputs
input_fields = {
'tag_values': tag_values
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# add tags to method arguments
kw_args = {}
tag_text = ''
if tag_values:
kw_args = {
'Filters': [ { 'Name': 'tag-value', 'Values': tag_values } ]
}
from labpack.parsing.grammar import join_words
plural_value = ''
if len(tag_values) > 1:
plural_value = 's'
tag_text = ' with tag value%s %s' % (plural_value, join_words(tag_values))
# request instance details from AWS
self.iam.printer('Querying AWS region %s for instances%s.' % (self.iam.region_name, tag_text))
instance_list = []
try:
if tag_values:
response = self.connection.describe_instances(**kw_args)
else:
response = self.connection.describe_instances()
except:
raise AWSConnectionError(title)
# repeat request if any instances are currently pending
response_list = response['Reservations']
for instance in response_list:
instance_info = instance['Instances'][0]
if instance_info['State']['Name'] == 'pending':
self.check_instance_state(instance_info['InstanceId'])
try:
if tag_values:
response = self.connection.describe_instances(**kw_args)
else:
response = self.connection.describe_instances()
except:
raise AWSConnectionError(title)
response_list = response['Reservations']
# populate list of instances with instance details
for instance in response_list:
instance_info = instance['Instances'][0]
state_name = instance_info['State']['Name']
if state_name not in ('shutting-down', 'terminated'):
instance_list.append(instance_info['InstanceId'])
# report results and return details
if instance_list:
print_out = 'Found instance'
if len(instance_list) > 1:
print_out += 's'
from labpack.parsing.grammar import join_words
print_out += ' %s.' % join_words(instance_list)
self.iam.printer(print_out)
else:
self.iam.printer('No instances found.')
return instance_list
|
python
|
def list_instances(self, tag_values=None):
'''
a method to retrieve the list of instances on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of strings with instance AWS ids
'''
title = '%s.list_instances' % self.__class__.__name__
# validate inputs
input_fields = {
'tag_values': tag_values
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# add tags to method arguments
kw_args = {}
tag_text = ''
if tag_values:
kw_args = {
'Filters': [ { 'Name': 'tag-value', 'Values': tag_values } ]
}
from labpack.parsing.grammar import join_words
plural_value = ''
if len(tag_values) > 1:
plural_value = 's'
tag_text = ' with tag value%s %s' % (plural_value, join_words(tag_values))
# request instance details from AWS
self.iam.printer('Querying AWS region %s for instances%s.' % (self.iam.region_name, tag_text))
instance_list = []
try:
if tag_values:
response = self.connection.describe_instances(**kw_args)
else:
response = self.connection.describe_instances()
except:
raise AWSConnectionError(title)
# repeat request if any instances are currently pending
response_list = response['Reservations']
for instance in response_list:
instance_info = instance['Instances'][0]
if instance_info['State']['Name'] == 'pending':
self.check_instance_state(instance_info['InstanceId'])
try:
if tag_values:
response = self.connection.describe_instances(**kw_args)
else:
response = self.connection.describe_instances()
except:
raise AWSConnectionError(title)
response_list = response['Reservations']
# populate list of instances with instance details
for instance in response_list:
instance_info = instance['Instances'][0]
state_name = instance_info['State']['Name']
if state_name not in ('shutting-down', 'terminated'):
instance_list.append(instance_info['InstanceId'])
# report results and return details
if instance_list:
print_out = 'Found instance'
if len(instance_list) > 1:
print_out += 's'
from labpack.parsing.grammar import join_words
print_out += ' %s.' % join_words(instance_list)
self.iam.printer(print_out)
else:
self.iam.printer('No instances found.')
return instance_list
|
[
"def",
"list_instances",
"(",
"self",
",",
"tag_values",
"=",
"None",
")",
":",
"title",
"=",
"'%s.list_instances'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'tag_values'",
":",
"tag_values",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"if",
"value",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# add tags to method arguments",
"kw_args",
"=",
"{",
"}",
"tag_text",
"=",
"''",
"if",
"tag_values",
":",
"kw_args",
"=",
"{",
"'Filters'",
":",
"[",
"{",
"'Name'",
":",
"'tag-value'",
",",
"'Values'",
":",
"tag_values",
"}",
"]",
"}",
"from",
"labpack",
".",
"parsing",
".",
"grammar",
"import",
"join_words",
"plural_value",
"=",
"''",
"if",
"len",
"(",
"tag_values",
")",
">",
"1",
":",
"plural_value",
"=",
"'s'",
"tag_text",
"=",
"' with tag value%s %s'",
"%",
"(",
"plural_value",
",",
"join_words",
"(",
"tag_values",
")",
")",
"# request instance details from AWS",
"self",
".",
"iam",
".",
"printer",
"(",
"'Querying AWS region %s for instances%s.'",
"%",
"(",
"self",
".",
"iam",
".",
"region_name",
",",
"tag_text",
")",
")",
"instance_list",
"=",
"[",
"]",
"try",
":",
"if",
"tag_values",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_instances",
"(",
"*",
"*",
"kw_args",
")",
"else",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_instances",
"(",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"# repeat request if any instances are currently pending",
"response_list",
"=",
"response",
"[",
"'Reservations'",
"]",
"for",
"instance",
"in",
"response_list",
":",
"instance_info",
"=",
"instance",
"[",
"'Instances'",
"]",
"[",
"0",
"]",
"if",
"instance_info",
"[",
"'State'",
"]",
"[",
"'Name'",
"]",
"==",
"'pending'",
":",
"self",
".",
"check_instance_state",
"(",
"instance_info",
"[",
"'InstanceId'",
"]",
")",
"try",
":",
"if",
"tag_values",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_instances",
"(",
"*",
"*",
"kw_args",
")",
"else",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_instances",
"(",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"response_list",
"=",
"response",
"[",
"'Reservations'",
"]",
"# populate list of instances with instance details",
"for",
"instance",
"in",
"response_list",
":",
"instance_info",
"=",
"instance",
"[",
"'Instances'",
"]",
"[",
"0",
"]",
"state_name",
"=",
"instance_info",
"[",
"'State'",
"]",
"[",
"'Name'",
"]",
"if",
"state_name",
"not",
"in",
"(",
"'shutting-down'",
",",
"'terminated'",
")",
":",
"instance_list",
".",
"append",
"(",
"instance_info",
"[",
"'InstanceId'",
"]",
")",
"# report results and return details",
"if",
"instance_list",
":",
"print_out",
"=",
"'Found instance'",
"if",
"len",
"(",
"instance_list",
")",
">",
"1",
":",
"print_out",
"+=",
"'s'",
"from",
"labpack",
".",
"parsing",
".",
"grammar",
"import",
"join_words",
"print_out",
"+=",
"' %s.'",
"%",
"join_words",
"(",
"instance_list",
")",
"self",
".",
"iam",
".",
"printer",
"(",
"print_out",
")",
"else",
":",
"self",
".",
"iam",
".",
"printer",
"(",
"'No instances found.'",
")",
"return",
"instance_list"
] |
a method to retrieve the list of instances on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of strings with instance AWS ids
|
[
"a",
"method",
"to",
"retrieve",
"the",
"list",
"of",
"instances",
"on",
"AWS",
"EC2"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L235-L312
|
244,935
|
collectiveacuity/labPack
|
labpack/platforms/aws/ec2.py
|
ec2Client.read_instance
|
def read_instance(self, instance_id):
'''
a method to retrieving the details of a single instances on AWS EC2
:param instance_id: string of instance id on AWS
:return: dictionary with instance attributes
relevant fields:
'instance_id': '',
'image_id': '',
'instance_type': '',
'region': '',
'state': { 'name': '' },
'key_name': '',
'public_dns_name': '',
'public_ip_address': '',
'tags': [{'key': '', 'value': ''}]
'''
title = '%s.read_instance' % self.__class__.__name__
# validate inputs
input_fields = {
'instance_id': instance_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Querying AWS region %s for properties of instance %s.' % (self.iam.region_name, instance_id))
# check instance state
self.iam.printer_on = False
self.check_instance_state(instance_id)
self.iam.printer_on = True
# discover details associated with instance id
try:
response = self.connection.describe_instances(InstanceIds=[ instance_id ])
except:
raise AWSConnectionError(title)
instance_info = response['Reservations'][0]['Instances'][0]
# repeat request if any instances are currently pending
if instance_info['State']['Name'] == 'pending':
self.check_instance_state(instance_info['InstanceId'])
try:
response = self.connection.describe_instances(
InstanceIds=[ instance_id ]
)
except:
raise AWSConnectionError(title)
instance_info = response['Reservations'][0]['Instances'][0]
# create dictionary of instance details
instance_details = {
'instance_id': '',
'image_id': '',
'key_name': '',
'instance_type': '',
'region': self.iam.region_name,
'tags': [],
'public_ip_address': '',
'public_dns_name': '',
'security_groups': [],
'subnet_id': '',
'vpc_id': ''
}
instance_details = self.iam.ingest(instance_info, instance_details)
return instance_details
|
python
|
def read_instance(self, instance_id):
'''
a method to retrieving the details of a single instances on AWS EC2
:param instance_id: string of instance id on AWS
:return: dictionary with instance attributes
relevant fields:
'instance_id': '',
'image_id': '',
'instance_type': '',
'region': '',
'state': { 'name': '' },
'key_name': '',
'public_dns_name': '',
'public_ip_address': '',
'tags': [{'key': '', 'value': ''}]
'''
title = '%s.read_instance' % self.__class__.__name__
# validate inputs
input_fields = {
'instance_id': instance_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Querying AWS region %s for properties of instance %s.' % (self.iam.region_name, instance_id))
# check instance state
self.iam.printer_on = False
self.check_instance_state(instance_id)
self.iam.printer_on = True
# discover details associated with instance id
try:
response = self.connection.describe_instances(InstanceIds=[ instance_id ])
except:
raise AWSConnectionError(title)
instance_info = response['Reservations'][0]['Instances'][0]
# repeat request if any instances are currently pending
if instance_info['State']['Name'] == 'pending':
self.check_instance_state(instance_info['InstanceId'])
try:
response = self.connection.describe_instances(
InstanceIds=[ instance_id ]
)
except:
raise AWSConnectionError(title)
instance_info = response['Reservations'][0]['Instances'][0]
# create dictionary of instance details
instance_details = {
'instance_id': '',
'image_id': '',
'key_name': '',
'instance_type': '',
'region': self.iam.region_name,
'tags': [],
'public_ip_address': '',
'public_dns_name': '',
'security_groups': [],
'subnet_id': '',
'vpc_id': ''
}
instance_details = self.iam.ingest(instance_info, instance_details)
return instance_details
|
[
"def",
"read_instance",
"(",
"self",
",",
"instance_id",
")",
":",
"title",
"=",
"'%s.read_instance'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'instance_id'",
":",
"instance_id",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# report query",
"self",
".",
"iam",
".",
"printer",
"(",
"'Querying AWS region %s for properties of instance %s.'",
"%",
"(",
"self",
".",
"iam",
".",
"region_name",
",",
"instance_id",
")",
")",
"# check instance state",
"self",
".",
"iam",
".",
"printer_on",
"=",
"False",
"self",
".",
"check_instance_state",
"(",
"instance_id",
")",
"self",
".",
"iam",
".",
"printer_on",
"=",
"True",
"# discover details associated with instance id",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_instances",
"(",
"InstanceIds",
"=",
"[",
"instance_id",
"]",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"instance_info",
"=",
"response",
"[",
"'Reservations'",
"]",
"[",
"0",
"]",
"[",
"'Instances'",
"]",
"[",
"0",
"]",
"# repeat request if any instances are currently pending",
"if",
"instance_info",
"[",
"'State'",
"]",
"[",
"'Name'",
"]",
"==",
"'pending'",
":",
"self",
".",
"check_instance_state",
"(",
"instance_info",
"[",
"'InstanceId'",
"]",
")",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_instances",
"(",
"InstanceIds",
"=",
"[",
"instance_id",
"]",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"instance_info",
"=",
"response",
"[",
"'Reservations'",
"]",
"[",
"0",
"]",
"[",
"'Instances'",
"]",
"[",
"0",
"]",
"# create dictionary of instance details",
"instance_details",
"=",
"{",
"'instance_id'",
":",
"''",
",",
"'image_id'",
":",
"''",
",",
"'key_name'",
":",
"''",
",",
"'instance_type'",
":",
"''",
",",
"'region'",
":",
"self",
".",
"iam",
".",
"region_name",
",",
"'tags'",
":",
"[",
"]",
",",
"'public_ip_address'",
":",
"''",
",",
"'public_dns_name'",
":",
"''",
",",
"'security_groups'",
":",
"[",
"]",
",",
"'subnet_id'",
":",
"''",
",",
"'vpc_id'",
":",
"''",
"}",
"instance_details",
"=",
"self",
".",
"iam",
".",
"ingest",
"(",
"instance_info",
",",
"instance_details",
")",
"return",
"instance_details"
] |
a method to retrieving the details of a single instances on AWS EC2
:param instance_id: string of instance id on AWS
:return: dictionary with instance attributes
relevant fields:
'instance_id': '',
'image_id': '',
'instance_type': '',
'region': '',
'state': { 'name': '' },
'key_name': '',
'public_dns_name': '',
'public_ip_address': '',
'tags': [{'key': '', 'value': ''}]
|
[
"a",
"method",
"to",
"retrieving",
"the",
"details",
"of",
"a",
"single",
"instances",
"on",
"AWS",
"EC2"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L314-L387
|
244,936
|
collectiveacuity/labPack
|
labpack/platforms/aws/ec2.py
|
ec2Client.delete_instance
|
def delete_instance(self, instance_id):
'''
method for removing an instance from AWS EC2
:param instance_id: string of instance id on AWS
:return: string reporting state of instance
'''
title = '%s.delete_instance' % self.__class__.__name__
# validate inputs
input_fields = {
'instance_id': instance_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Removing instance %s from AWS region %s.' % (instance_id, self.iam.region_name))
# retrieve state
old_state = self.check_instance_state(instance_id)
# discover tags associated with instance id
tag_list = []
try:
response = self.connection.describe_tags(
Filters=[ { 'Name': 'resource-id', 'Values': [ instance_id ] } ]
)
import re
aws_tag_pattern = re.compile('aws:')
for i in range(0, len(response['Tags'])):
if not aws_tag_pattern.findall(response['Tags'][i]['Key']):
tag = {}
tag['Key'] = response['Tags'][i]['Key']
tag['Value'] = response['Tags'][i]['Value']
tag_list.append(tag)
except:
raise AWSConnectionError(title)
# remove tags from instance
try:
self.connection.delete_tags(
Resources=[ instance_id ],
Tags=tag_list
)
self.iam.printer('Tags have been deleted from %s.' % instance_id)
except:
raise AWSConnectionError(title)
# stop instance
try:
self.connection.stop_instances(
InstanceIds=[ instance_id ]
)
except:
raise AWSConnectionError(title)
# terminate instance
try:
response = self.connection.terminate_instances(
InstanceIds=[ instance_id ]
)
new_state = response['TerminatingInstances'][0]['CurrentState']['Name']
except:
raise AWSConnectionError(title)
# report outcome and return true
self.iam.printer('Instance %s was %s.' % (instance_id, old_state))
self.iam.printer('Instance %s is %s.' % (instance_id, new_state))
return new_state
|
python
|
def delete_instance(self, instance_id):
'''
method for removing an instance from AWS EC2
:param instance_id: string of instance id on AWS
:return: string reporting state of instance
'''
title = '%s.delete_instance' % self.__class__.__name__
# validate inputs
input_fields = {
'instance_id': instance_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Removing instance %s from AWS region %s.' % (instance_id, self.iam.region_name))
# retrieve state
old_state = self.check_instance_state(instance_id)
# discover tags associated with instance id
tag_list = []
try:
response = self.connection.describe_tags(
Filters=[ { 'Name': 'resource-id', 'Values': [ instance_id ] } ]
)
import re
aws_tag_pattern = re.compile('aws:')
for i in range(0, len(response['Tags'])):
if not aws_tag_pattern.findall(response['Tags'][i]['Key']):
tag = {}
tag['Key'] = response['Tags'][i]['Key']
tag['Value'] = response['Tags'][i]['Value']
tag_list.append(tag)
except:
raise AWSConnectionError(title)
# remove tags from instance
try:
self.connection.delete_tags(
Resources=[ instance_id ],
Tags=tag_list
)
self.iam.printer('Tags have been deleted from %s.' % instance_id)
except:
raise AWSConnectionError(title)
# stop instance
try:
self.connection.stop_instances(
InstanceIds=[ instance_id ]
)
except:
raise AWSConnectionError(title)
# terminate instance
try:
response = self.connection.terminate_instances(
InstanceIds=[ instance_id ]
)
new_state = response['TerminatingInstances'][0]['CurrentState']['Name']
except:
raise AWSConnectionError(title)
# report outcome and return true
self.iam.printer('Instance %s was %s.' % (instance_id, old_state))
self.iam.printer('Instance %s is %s.' % (instance_id, new_state))
return new_state
|
[
"def",
"delete_instance",
"(",
"self",
",",
"instance_id",
")",
":",
"title",
"=",
"'%s.delete_instance'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'instance_id'",
":",
"instance_id",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# report query",
"self",
".",
"iam",
".",
"printer",
"(",
"'Removing instance %s from AWS region %s.'",
"%",
"(",
"instance_id",
",",
"self",
".",
"iam",
".",
"region_name",
")",
")",
"# retrieve state",
"old_state",
"=",
"self",
".",
"check_instance_state",
"(",
"instance_id",
")",
"# discover tags associated with instance id",
"tag_list",
"=",
"[",
"]",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_tags",
"(",
"Filters",
"=",
"[",
"{",
"'Name'",
":",
"'resource-id'",
",",
"'Values'",
":",
"[",
"instance_id",
"]",
"}",
"]",
")",
"import",
"re",
"aws_tag_pattern",
"=",
"re",
".",
"compile",
"(",
"'aws:'",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"response",
"[",
"'Tags'",
"]",
")",
")",
":",
"if",
"not",
"aws_tag_pattern",
".",
"findall",
"(",
"response",
"[",
"'Tags'",
"]",
"[",
"i",
"]",
"[",
"'Key'",
"]",
")",
":",
"tag",
"=",
"{",
"}",
"tag",
"[",
"'Key'",
"]",
"=",
"response",
"[",
"'Tags'",
"]",
"[",
"i",
"]",
"[",
"'Key'",
"]",
"tag",
"[",
"'Value'",
"]",
"=",
"response",
"[",
"'Tags'",
"]",
"[",
"i",
"]",
"[",
"'Value'",
"]",
"tag_list",
".",
"append",
"(",
"tag",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"# remove tags from instance",
"try",
":",
"self",
".",
"connection",
".",
"delete_tags",
"(",
"Resources",
"=",
"[",
"instance_id",
"]",
",",
"Tags",
"=",
"tag_list",
")",
"self",
".",
"iam",
".",
"printer",
"(",
"'Tags have been deleted from %s.'",
"%",
"instance_id",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"# stop instance",
"try",
":",
"self",
".",
"connection",
".",
"stop_instances",
"(",
"InstanceIds",
"=",
"[",
"instance_id",
"]",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"# terminate instance",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"terminate_instances",
"(",
"InstanceIds",
"=",
"[",
"instance_id",
"]",
")",
"new_state",
"=",
"response",
"[",
"'TerminatingInstances'",
"]",
"[",
"0",
"]",
"[",
"'CurrentState'",
"]",
"[",
"'Name'",
"]",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"# report outcome and return true",
"self",
".",
"iam",
".",
"printer",
"(",
"'Instance %s was %s.'",
"%",
"(",
"instance_id",
",",
"old_state",
")",
")",
"self",
".",
"iam",
".",
"printer",
"(",
"'Instance %s is %s.'",
"%",
"(",
"instance_id",
",",
"new_state",
")",
")",
"return",
"new_state"
] |
method for removing an instance from AWS EC2
:param instance_id: string of instance id on AWS
:return: string reporting state of instance
|
[
"method",
"for",
"removing",
"an",
"instance",
"from",
"AWS",
"EC2"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L613-L685
|
244,937
|
collectiveacuity/labPack
|
labpack/platforms/aws/ec2.py
|
ec2Client.list_images
|
def list_images(self, tag_values=None):
'''
a method to retrieve the list of images of account on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of image AWS ids
'''
title = '%s.list_images' % self.__class__.__name__
# validate inputs
input_fields = {
'tag_values': tag_values
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# add tags to method arguments
kw_args = { 'Owners': [ self.iam.owner_id ] }
tag_text = ''
if tag_values:
kw_args = {
'Filters': [ { 'Name': 'tag-value', 'Values': tag_values } ]
}
from labpack.parsing.grammar import join_words
plural_value = ''
if len(tag_values) > 1:
plural_value = 's'
tag_text = ' with tag value%s %s' % (plural_value, join_words(tag_values))
# request image details from AWS
self.iam.printer('Querying AWS region %s for images%s.' % (self.iam.region_name, tag_text))
image_list = []
try:
response = self.connection.describe_images(**kw_args)
except:
raise AWSConnectionError(title)
response_list = response['Images']
# repeat request
if not response_list:
from time import sleep
from timeit import default_timer as timer
self.iam.printer('No images found initially. Checking again', flush=True)
state_timeout = 0
delay = 3
while not response_list and state_timeout < 12:
self.iam.printer('.', flush=True)
sleep(delay)
t3 = timer()
try:
response = self.connection.describe_images(**kw_args)
except:
raise AWSConnectionError(title)
response_list = response['Images']
t4 = timer()
state_timeout += 1
response_time = t4 - t3
if 3 - response_time > 0:
delay = 3 - response_time
else:
delay = 0
self.iam.printer(' done.')
# wait until all images are no longer pending
for image in response_list:
image_list.append(image['ImageId'])
# report outcome and return results
if image_list:
print_out = 'Found image'
if len(image_list) > 1:
print_out += 's'
from labpack.parsing.grammar import join_words
print_out += ' %s.' % join_words(image_list)
self.iam.printer(print_out)
else:
self.iam.printer('No images found.')
return image_list
|
python
|
def list_images(self, tag_values=None):
'''
a method to retrieve the list of images of account on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of image AWS ids
'''
title = '%s.list_images' % self.__class__.__name__
# validate inputs
input_fields = {
'tag_values': tag_values
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# add tags to method arguments
kw_args = { 'Owners': [ self.iam.owner_id ] }
tag_text = ''
if tag_values:
kw_args = {
'Filters': [ { 'Name': 'tag-value', 'Values': tag_values } ]
}
from labpack.parsing.grammar import join_words
plural_value = ''
if len(tag_values) > 1:
plural_value = 's'
tag_text = ' with tag value%s %s' % (plural_value, join_words(tag_values))
# request image details from AWS
self.iam.printer('Querying AWS region %s for images%s.' % (self.iam.region_name, tag_text))
image_list = []
try:
response = self.connection.describe_images(**kw_args)
except:
raise AWSConnectionError(title)
response_list = response['Images']
# repeat request
if not response_list:
from time import sleep
from timeit import default_timer as timer
self.iam.printer('No images found initially. Checking again', flush=True)
state_timeout = 0
delay = 3
while not response_list and state_timeout < 12:
self.iam.printer('.', flush=True)
sleep(delay)
t3 = timer()
try:
response = self.connection.describe_images(**kw_args)
except:
raise AWSConnectionError(title)
response_list = response['Images']
t4 = timer()
state_timeout += 1
response_time = t4 - t3
if 3 - response_time > 0:
delay = 3 - response_time
else:
delay = 0
self.iam.printer(' done.')
# wait until all images are no longer pending
for image in response_list:
image_list.append(image['ImageId'])
# report outcome and return results
if image_list:
print_out = 'Found image'
if len(image_list) > 1:
print_out += 's'
from labpack.parsing.grammar import join_words
print_out += ' %s.' % join_words(image_list)
self.iam.printer(print_out)
else:
self.iam.printer('No images found.')
return image_list
|
[
"def",
"list_images",
"(",
"self",
",",
"tag_values",
"=",
"None",
")",
":",
"title",
"=",
"'%s.list_images'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'tag_values'",
":",
"tag_values",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"if",
"value",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# add tags to method arguments",
"kw_args",
"=",
"{",
"'Owners'",
":",
"[",
"self",
".",
"iam",
".",
"owner_id",
"]",
"}",
"tag_text",
"=",
"''",
"if",
"tag_values",
":",
"kw_args",
"=",
"{",
"'Filters'",
":",
"[",
"{",
"'Name'",
":",
"'tag-value'",
",",
"'Values'",
":",
"tag_values",
"}",
"]",
"}",
"from",
"labpack",
".",
"parsing",
".",
"grammar",
"import",
"join_words",
"plural_value",
"=",
"''",
"if",
"len",
"(",
"tag_values",
")",
">",
"1",
":",
"plural_value",
"=",
"'s'",
"tag_text",
"=",
"' with tag value%s %s'",
"%",
"(",
"plural_value",
",",
"join_words",
"(",
"tag_values",
")",
")",
"# request image details from AWS",
"self",
".",
"iam",
".",
"printer",
"(",
"'Querying AWS region %s for images%s.'",
"%",
"(",
"self",
".",
"iam",
".",
"region_name",
",",
"tag_text",
")",
")",
"image_list",
"=",
"[",
"]",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_images",
"(",
"*",
"*",
"kw_args",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"response_list",
"=",
"response",
"[",
"'Images'",
"]",
"# repeat request",
"if",
"not",
"response_list",
":",
"from",
"time",
"import",
"sleep",
"from",
"timeit",
"import",
"default_timer",
"as",
"timer",
"self",
".",
"iam",
".",
"printer",
"(",
"'No images found initially. Checking again'",
",",
"flush",
"=",
"True",
")",
"state_timeout",
"=",
"0",
"delay",
"=",
"3",
"while",
"not",
"response_list",
"and",
"state_timeout",
"<",
"12",
":",
"self",
".",
"iam",
".",
"printer",
"(",
"'.'",
",",
"flush",
"=",
"True",
")",
"sleep",
"(",
"delay",
")",
"t3",
"=",
"timer",
"(",
")",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_images",
"(",
"*",
"*",
"kw_args",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"response_list",
"=",
"response",
"[",
"'Images'",
"]",
"t4",
"=",
"timer",
"(",
")",
"state_timeout",
"+=",
"1",
"response_time",
"=",
"t4",
"-",
"t3",
"if",
"3",
"-",
"response_time",
">",
"0",
":",
"delay",
"=",
"3",
"-",
"response_time",
"else",
":",
"delay",
"=",
"0",
"self",
".",
"iam",
".",
"printer",
"(",
"' done.'",
")",
"# wait until all images are no longer pending",
"for",
"image",
"in",
"response_list",
":",
"image_list",
".",
"append",
"(",
"image",
"[",
"'ImageId'",
"]",
")",
"# report outcome and return results",
"if",
"image_list",
":",
"print_out",
"=",
"'Found image'",
"if",
"len",
"(",
"image_list",
")",
">",
"1",
":",
"print_out",
"+=",
"'s'",
"from",
"labpack",
".",
"parsing",
".",
"grammar",
"import",
"join_words",
"print_out",
"+=",
"' %s.'",
"%",
"join_words",
"(",
"image_list",
")",
"self",
".",
"iam",
".",
"printer",
"(",
"print_out",
")",
"else",
":",
"self",
".",
"iam",
".",
"printer",
"(",
"'No images found.'",
")",
"return",
"image_list"
] |
a method to retrieve the list of images of account on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of image AWS ids
|
[
"a",
"method",
"to",
"retrieve",
"the",
"list",
"of",
"images",
"of",
"account",
"on",
"AWS",
"EC2"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L918-L1000
|
244,938
|
collectiveacuity/labPack
|
labpack/platforms/aws/ec2.py
|
ec2Client.read_image
|
def read_image(self, image_id):
'''
a method to retrieve the details of a single image on AWS EC2
:param image_id: string with AWS id of image
:return: dictionary of image attributes
relevant fields:
'image_id': '',
'snapshot_id': '',
'region': '',
'state': '',
'tags': []
'''
title = '%s.read_image' % self.__class__.__name__
# validate inputs
input_fields = {
'image_id': image_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Querying AWS region %s for properties of image %s.' % (self.iam.region_name, image_id))
# check image state
self.iam.printer_on = False
self.check_image_state(image_id)
self.iam.printer_on = True
# discover tags and snapshot id associated with image id
try:
response = self.connection.describe_images(ImageIds=[ image_id ])
except:
raise AWSConnectionError(title)
image_info = response['Images'][0]
# construct image details from response
image_details = {
'image_id': '',
'state': '',
'name': '',
'region': self.iam.region_name,
'tags': []
}
image_details = self.iam.ingest(image_info, image_details)
image_details['snapshot_id'] = image_details['block_device_mappings'][0]['ebs']['snapshot_id']
return image_details
|
python
|
def read_image(self, image_id):
'''
a method to retrieve the details of a single image on AWS EC2
:param image_id: string with AWS id of image
:return: dictionary of image attributes
relevant fields:
'image_id': '',
'snapshot_id': '',
'region': '',
'state': '',
'tags': []
'''
title = '%s.read_image' % self.__class__.__name__
# validate inputs
input_fields = {
'image_id': image_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Querying AWS region %s for properties of image %s.' % (self.iam.region_name, image_id))
# check image state
self.iam.printer_on = False
self.check_image_state(image_id)
self.iam.printer_on = True
# discover tags and snapshot id associated with image id
try:
response = self.connection.describe_images(ImageIds=[ image_id ])
except:
raise AWSConnectionError(title)
image_info = response['Images'][0]
# construct image details from response
image_details = {
'image_id': '',
'state': '',
'name': '',
'region': self.iam.region_name,
'tags': []
}
image_details = self.iam.ingest(image_info, image_details)
image_details['snapshot_id'] = image_details['block_device_mappings'][0]['ebs']['snapshot_id']
return image_details
|
[
"def",
"read_image",
"(",
"self",
",",
"image_id",
")",
":",
"title",
"=",
"'%s.read_image'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'image_id'",
":",
"image_id",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# report query",
"self",
".",
"iam",
".",
"printer",
"(",
"'Querying AWS region %s for properties of image %s.'",
"%",
"(",
"self",
".",
"iam",
".",
"region_name",
",",
"image_id",
")",
")",
"# check image state",
"self",
".",
"iam",
".",
"printer_on",
"=",
"False",
"self",
".",
"check_image_state",
"(",
"image_id",
")",
"self",
".",
"iam",
".",
"printer_on",
"=",
"True",
"# discover tags and snapshot id associated with image id",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_images",
"(",
"ImageIds",
"=",
"[",
"image_id",
"]",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"image_info",
"=",
"response",
"[",
"'Images'",
"]",
"[",
"0",
"]",
"# construct image details from response",
"image_details",
"=",
"{",
"'image_id'",
":",
"''",
",",
"'state'",
":",
"''",
",",
"'name'",
":",
"''",
",",
"'region'",
":",
"self",
".",
"iam",
".",
"region_name",
",",
"'tags'",
":",
"[",
"]",
"}",
"image_details",
"=",
"self",
".",
"iam",
".",
"ingest",
"(",
"image_info",
",",
"image_details",
")",
"image_details",
"[",
"'snapshot_id'",
"]",
"=",
"image_details",
"[",
"'block_device_mappings'",
"]",
"[",
"0",
"]",
"[",
"'ebs'",
"]",
"[",
"'snapshot_id'",
"]",
"return",
"image_details"
] |
a method to retrieve the details of a single image on AWS EC2
:param image_id: string with AWS id of image
:return: dictionary of image attributes
relevant fields:
'image_id': '',
'snapshot_id': '',
'region': '',
'state': '',
'tags': []
|
[
"a",
"method",
"to",
"retrieve",
"the",
"details",
"of",
"a",
"single",
"image",
"on",
"AWS",
"EC2"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L1002-L1055
|
244,939
|
collectiveacuity/labPack
|
labpack/platforms/aws/ec2.py
|
ec2Client.delete_image
|
def delete_image(self, image_id):
'''
method for removing an image from AWS EC2
:param image_id: string with AWS id of instance
:return: string with AWS response from snapshot delete
'''
title = '%s.delete_image' % self.__class__.__name__
# validate inputs
input_fields = {
'image_id': image_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Removing image %s from AWS region %s.' % (image_id, self.iam.region_name))
# retrieve state
old_state = self.check_image_state(image_id)
# discover snapshot id and tags associated with instance id
image_details = self.read_image(image_id)
tag_list = image_details['tags']
snapshot_id = image_details['snapshot_id']
# remove tags from instance
try:
delete_kwargs = {
'Resources': [ image_id ],
'Tags': self.iam.prepare(tag_list)
}
self.connection.delete_tags(**delete_kwargs)
self.iam.printer('Tags have been deleted from %s.' % image_id)
except:
raise AWSConnectionError(title)
# deregister image
try:
self.connection.deregister_image(
ImageId=image_id
)
except:
raise AWSConnectionError(title)
self.iam.printer('Image %s has been deregistered.' % image_id)
# delete snapshot
try:
response = self.connection.delete_snapshot(
SnapshotId=snapshot_id
)
except:
raise AWSConnectionError(title)
self.iam.printer('Snapshot %s associated with image %s has been deleted.' % (snapshot_id, image_id))
return response
|
python
|
def delete_image(self, image_id):
'''
method for removing an image from AWS EC2
:param image_id: string with AWS id of instance
:return: string with AWS response from snapshot delete
'''
title = '%s.delete_image' % self.__class__.__name__
# validate inputs
input_fields = {
'image_id': image_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Removing image %s from AWS region %s.' % (image_id, self.iam.region_name))
# retrieve state
old_state = self.check_image_state(image_id)
# discover snapshot id and tags associated with instance id
image_details = self.read_image(image_id)
tag_list = image_details['tags']
snapshot_id = image_details['snapshot_id']
# remove tags from instance
try:
delete_kwargs = {
'Resources': [ image_id ],
'Tags': self.iam.prepare(tag_list)
}
self.connection.delete_tags(**delete_kwargs)
self.iam.printer('Tags have been deleted from %s.' % image_id)
except:
raise AWSConnectionError(title)
# deregister image
try:
self.connection.deregister_image(
ImageId=image_id
)
except:
raise AWSConnectionError(title)
self.iam.printer('Image %s has been deregistered.' % image_id)
# delete snapshot
try:
response = self.connection.delete_snapshot(
SnapshotId=snapshot_id
)
except:
raise AWSConnectionError(title)
self.iam.printer('Snapshot %s associated with image %s has been deleted.' % (snapshot_id, image_id))
return response
|
[
"def",
"delete_image",
"(",
"self",
",",
"image_id",
")",
":",
"title",
"=",
"'%s.delete_image'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'image_id'",
":",
"image_id",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# report query",
"self",
".",
"iam",
".",
"printer",
"(",
"'Removing image %s from AWS region %s.'",
"%",
"(",
"image_id",
",",
"self",
".",
"iam",
".",
"region_name",
")",
")",
"# retrieve state",
"old_state",
"=",
"self",
".",
"check_image_state",
"(",
"image_id",
")",
"# discover snapshot id and tags associated with instance id",
"image_details",
"=",
"self",
".",
"read_image",
"(",
"image_id",
")",
"tag_list",
"=",
"image_details",
"[",
"'tags'",
"]",
"snapshot_id",
"=",
"image_details",
"[",
"'snapshot_id'",
"]",
"# remove tags from instance",
"try",
":",
"delete_kwargs",
"=",
"{",
"'Resources'",
":",
"[",
"image_id",
"]",
",",
"'Tags'",
":",
"self",
".",
"iam",
".",
"prepare",
"(",
"tag_list",
")",
"}",
"self",
".",
"connection",
".",
"delete_tags",
"(",
"*",
"*",
"delete_kwargs",
")",
"self",
".",
"iam",
".",
"printer",
"(",
"'Tags have been deleted from %s.'",
"%",
"image_id",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"# deregister image",
"try",
":",
"self",
".",
"connection",
".",
"deregister_image",
"(",
"ImageId",
"=",
"image_id",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"self",
".",
"iam",
".",
"printer",
"(",
"'Image %s has been deregistered.'",
"%",
"image_id",
")",
"# delete snapshot",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"delete_snapshot",
"(",
"SnapshotId",
"=",
"snapshot_id",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"self",
".",
"iam",
".",
"printer",
"(",
"'Snapshot %s associated with image %s has been deleted.'",
"%",
"(",
"snapshot_id",
",",
"image_id",
")",
")",
"return",
"response"
] |
method for removing an image from AWS EC2
:param image_id: string with AWS id of instance
:return: string with AWS response from snapshot delete
|
[
"method",
"for",
"removing",
"an",
"image",
"from",
"AWS",
"EC2"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L1239-L1298
|
244,940
|
collectiveacuity/labPack
|
labpack/platforms/aws/ec2.py
|
ec2Client.list_keypairs
|
def list_keypairs(self):
'''
a method to discover the list of key pairs on AWS
:return: list of key pairs
'''
title = '%s.list_keypairs' % self.__class__.__name__
# request subnet list from AWS
self.iam.printer('Querying AWS region %s for key pairs.' % self.iam.region_name)
keypair_list = []
try:
response = self.connection.describe_key_pairs()
except:
raise AWSConnectionError(title)
response_list = []
if 'KeyPairs' in response:
response_list = response['KeyPairs']
# construct list of keypairs from response
for sub_dict in response_list:
keypair_list.append(sub_dict['KeyName'])
# report results and return list
if keypair_list:
print_out = 'Found key pair'
if len(keypair_list) > 1:
print_out += 's'
from labpack.parsing.grammar import join_words
print_out += ' %s.' % join_words(keypair_list)
self.iam.printer(print_out)
else:
self.iam.printer('No key pairs found.')
return keypair_list
|
python
|
def list_keypairs(self):
'''
a method to discover the list of key pairs on AWS
:return: list of key pairs
'''
title = '%s.list_keypairs' % self.__class__.__name__
# request subnet list from AWS
self.iam.printer('Querying AWS region %s for key pairs.' % self.iam.region_name)
keypair_list = []
try:
response = self.connection.describe_key_pairs()
except:
raise AWSConnectionError(title)
response_list = []
if 'KeyPairs' in response:
response_list = response['KeyPairs']
# construct list of keypairs from response
for sub_dict in response_list:
keypair_list.append(sub_dict['KeyName'])
# report results and return list
if keypair_list:
print_out = 'Found key pair'
if len(keypair_list) > 1:
print_out += 's'
from labpack.parsing.grammar import join_words
print_out += ' %s.' % join_words(keypair_list)
self.iam.printer(print_out)
else:
self.iam.printer('No key pairs found.')
return keypair_list
|
[
"def",
"list_keypairs",
"(",
"self",
")",
":",
"title",
"=",
"'%s.list_keypairs'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# request subnet list from AWS",
"self",
".",
"iam",
".",
"printer",
"(",
"'Querying AWS region %s for key pairs.'",
"%",
"self",
".",
"iam",
".",
"region_name",
")",
"keypair_list",
"=",
"[",
"]",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_key_pairs",
"(",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"response_list",
"=",
"[",
"]",
"if",
"'KeyPairs'",
"in",
"response",
":",
"response_list",
"=",
"response",
"[",
"'KeyPairs'",
"]",
"# construct list of keypairs from response",
"for",
"sub_dict",
"in",
"response_list",
":",
"keypair_list",
".",
"append",
"(",
"sub_dict",
"[",
"'KeyName'",
"]",
")",
"# report results and return list",
"if",
"keypair_list",
":",
"print_out",
"=",
"'Found key pair'",
"if",
"len",
"(",
"keypair_list",
")",
">",
"1",
":",
"print_out",
"+=",
"'s'",
"from",
"labpack",
".",
"parsing",
".",
"grammar",
"import",
"join_words",
"print_out",
"+=",
"' %s.'",
"%",
"join_words",
"(",
"keypair_list",
")",
"self",
".",
"iam",
".",
"printer",
"(",
"print_out",
")",
"else",
":",
"self",
".",
"iam",
".",
"printer",
"(",
"'No key pairs found.'",
")",
"return",
"keypair_list"
] |
a method to discover the list of key pairs on AWS
:return: list of key pairs
|
[
"a",
"method",
"to",
"discover",
"the",
"list",
"of",
"key",
"pairs",
"on",
"AWS"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L1564-L1600
|
244,941
|
collectiveacuity/labPack
|
labpack/platforms/aws/ec2.py
|
ec2Client.list_subnets
|
def list_subnets(self, tag_values=None):
'''
a method to discover the list of subnets on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of strings with subnet ids
'''
title = '%s.list_subnets' % self.__class__.__name__
# validate inputs
input_fields = {
'tag_values': tag_values
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# add tags to method arguments
kw_args = {}
tag_text = ''
if tag_values:
kw_args = {
'Filters': [ { 'Name': 'tag-value', 'Values': tag_values } ]
}
from labpack.parsing.grammar import join_words
plural_value = ''
if len(tag_values) > 1:
plural_value = 's'
tag_text = ' with tag value%s %s' % (plural_value, join_words(tag_values))
# request instance details from AWS
self.iam.printer('Querying AWS region %s for subnets%s.' % (self.iam.region_name, tag_text))
subnet_list = []
try:
if kw_args:
response = self.connection.describe_subnets(**kw_args)
else:
response = self.connection.describe_subnets()
except:
raise AWSConnectionError(title)
response_list = []
if 'Subnets' in response:
response_list = response['Subnets']
# construct list of subnets from response
for sub_dict in response_list:
subnet_list.append(sub_dict['SubnetId'])
# report results and return list
if subnet_list:
print_out = 'Found subnet'
if len(subnet_list) > 1:
print_out += 's'
from labpack.parsing.grammar import join_words
print_out += ' %s.' % join_words(subnet_list)
self.iam.printer(print_out)
else:
self.iam.printer('No subnets found.')
return subnet_list
|
python
|
def list_subnets(self, tag_values=None):
'''
a method to discover the list of subnets on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of strings with subnet ids
'''
title = '%s.list_subnets' % self.__class__.__name__
# validate inputs
input_fields = {
'tag_values': tag_values
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# add tags to method arguments
kw_args = {}
tag_text = ''
if tag_values:
kw_args = {
'Filters': [ { 'Name': 'tag-value', 'Values': tag_values } ]
}
from labpack.parsing.grammar import join_words
plural_value = ''
if len(tag_values) > 1:
plural_value = 's'
tag_text = ' with tag value%s %s' % (plural_value, join_words(tag_values))
# request instance details from AWS
self.iam.printer('Querying AWS region %s for subnets%s.' % (self.iam.region_name, tag_text))
subnet_list = []
try:
if kw_args:
response = self.connection.describe_subnets(**kw_args)
else:
response = self.connection.describe_subnets()
except:
raise AWSConnectionError(title)
response_list = []
if 'Subnets' in response:
response_list = response['Subnets']
# construct list of subnets from response
for sub_dict in response_list:
subnet_list.append(sub_dict['SubnetId'])
# report results and return list
if subnet_list:
print_out = 'Found subnet'
if len(subnet_list) > 1:
print_out += 's'
from labpack.parsing.grammar import join_words
print_out += ' %s.' % join_words(subnet_list)
self.iam.printer(print_out)
else:
self.iam.printer('No subnets found.')
return subnet_list
|
[
"def",
"list_subnets",
"(",
"self",
",",
"tag_values",
"=",
"None",
")",
":",
"title",
"=",
"'%s.list_subnets'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'tag_values'",
":",
"tag_values",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"if",
"value",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# add tags to method arguments",
"kw_args",
"=",
"{",
"}",
"tag_text",
"=",
"''",
"if",
"tag_values",
":",
"kw_args",
"=",
"{",
"'Filters'",
":",
"[",
"{",
"'Name'",
":",
"'tag-value'",
",",
"'Values'",
":",
"tag_values",
"}",
"]",
"}",
"from",
"labpack",
".",
"parsing",
".",
"grammar",
"import",
"join_words",
"plural_value",
"=",
"''",
"if",
"len",
"(",
"tag_values",
")",
">",
"1",
":",
"plural_value",
"=",
"'s'",
"tag_text",
"=",
"' with tag value%s %s'",
"%",
"(",
"plural_value",
",",
"join_words",
"(",
"tag_values",
")",
")",
"# request instance details from AWS",
"self",
".",
"iam",
".",
"printer",
"(",
"'Querying AWS region %s for subnets%s.'",
"%",
"(",
"self",
".",
"iam",
".",
"region_name",
",",
"tag_text",
")",
")",
"subnet_list",
"=",
"[",
"]",
"try",
":",
"if",
"kw_args",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_subnets",
"(",
"*",
"*",
"kw_args",
")",
"else",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_subnets",
"(",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"response_list",
"=",
"[",
"]",
"if",
"'Subnets'",
"in",
"response",
":",
"response_list",
"=",
"response",
"[",
"'Subnets'",
"]",
"# construct list of subnets from response",
"for",
"sub_dict",
"in",
"response_list",
":",
"subnet_list",
".",
"append",
"(",
"sub_dict",
"[",
"'SubnetId'",
"]",
")",
"# report results and return list",
"if",
"subnet_list",
":",
"print_out",
"=",
"'Found subnet'",
"if",
"len",
"(",
"subnet_list",
")",
">",
"1",
":",
"print_out",
"+=",
"'s'",
"from",
"labpack",
".",
"parsing",
".",
"grammar",
"import",
"join_words",
"print_out",
"+=",
"' %s.'",
"%",
"join_words",
"(",
"subnet_list",
")",
"self",
".",
"iam",
".",
"printer",
"(",
"print_out",
")",
"else",
":",
"self",
".",
"iam",
".",
"printer",
"(",
"'No subnets found.'",
")",
"return",
"subnet_list"
] |
a method to discover the list of subnets on AWS EC2
:param tag_values: [optional] list of tag values
:return: list of strings with subnet ids
|
[
"a",
"method",
"to",
"discover",
"the",
"list",
"of",
"subnets",
"on",
"AWS",
"EC2"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L1602-L1664
|
244,942
|
collectiveacuity/labPack
|
labpack/platforms/aws/ec2.py
|
ec2Client.read_subnet
|
def read_subnet(self, subnet_id):
'''
a method to retrieve the details about a subnet
:param subnet_id: string with AWS id of subnet
:return: dictionary with subnet details
relevant fields:
'subnet_id': '',
'vpc_id': '',
'availability_zone': '',
'state': '',
'tags': [{'key': '', 'value': ''}]
'''
title = '%s.read_subnet' % self.__class__.__name__
# validate inputs
input_fields = {
'subnet_id': subnet_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Querying AWS region %s for properties of subnet %s.' % (self.iam.region_name, subnet_id))
# construct keyword definitions
kw_args = { 'SubnetIds': [ subnet_id ] }
# send request for details about subnet
try:
response = self.connection.describe_subnets(**kw_args)
except:
raise AWSConnectionError(title)
# construct subnet details from response
subnet_dict = response['Subnets'][0]
subnet_details = {
'subnet_id': '',
'vpc_id': '',
'availability_zone': '',
'state': '',
'tags': []
}
subnet_details = self.iam.ingest(subnet_dict, subnet_details)
return subnet_details
|
python
|
def read_subnet(self, subnet_id):
'''
a method to retrieve the details about a subnet
:param subnet_id: string with AWS id of subnet
:return: dictionary with subnet details
relevant fields:
'subnet_id': '',
'vpc_id': '',
'availability_zone': '',
'state': '',
'tags': [{'key': '', 'value': ''}]
'''
title = '%s.read_subnet' % self.__class__.__name__
# validate inputs
input_fields = {
'subnet_id': subnet_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Querying AWS region %s for properties of subnet %s.' % (self.iam.region_name, subnet_id))
# construct keyword definitions
kw_args = { 'SubnetIds': [ subnet_id ] }
# send request for details about subnet
try:
response = self.connection.describe_subnets(**kw_args)
except:
raise AWSConnectionError(title)
# construct subnet details from response
subnet_dict = response['Subnets'][0]
subnet_details = {
'subnet_id': '',
'vpc_id': '',
'availability_zone': '',
'state': '',
'tags': []
}
subnet_details = self.iam.ingest(subnet_dict, subnet_details)
return subnet_details
|
[
"def",
"read_subnet",
"(",
"self",
",",
"subnet_id",
")",
":",
"title",
"=",
"'%s.read_subnet'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'subnet_id'",
":",
"subnet_id",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# report query",
"self",
".",
"iam",
".",
"printer",
"(",
"'Querying AWS region %s for properties of subnet %s.'",
"%",
"(",
"self",
".",
"iam",
".",
"region_name",
",",
"subnet_id",
")",
")",
"# construct keyword definitions",
"kw_args",
"=",
"{",
"'SubnetIds'",
":",
"[",
"subnet_id",
"]",
"}",
"# send request for details about subnet",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_subnets",
"(",
"*",
"*",
"kw_args",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"# construct subnet details from response",
"subnet_dict",
"=",
"response",
"[",
"'Subnets'",
"]",
"[",
"0",
"]",
"subnet_details",
"=",
"{",
"'subnet_id'",
":",
"''",
",",
"'vpc_id'",
":",
"''",
",",
"'availability_zone'",
":",
"''",
",",
"'state'",
":",
"''",
",",
"'tags'",
":",
"[",
"]",
"}",
"subnet_details",
"=",
"self",
".",
"iam",
".",
"ingest",
"(",
"subnet_dict",
",",
"subnet_details",
")",
"return",
"subnet_details"
] |
a method to retrieve the details about a subnet
:param subnet_id: string with AWS id of subnet
:return: dictionary with subnet details
relevant fields:
'subnet_id': '',
'vpc_id': '',
'availability_zone': '',
'state': '',
'tags': [{'key': '', 'value': ''}]
|
[
"a",
"method",
"to",
"retrieve",
"the",
"details",
"about",
"a",
"subnet"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L1666-L1716
|
244,943
|
collectiveacuity/labPack
|
labpack/platforms/aws/ec2.py
|
ec2Client.read_security_group
|
def read_security_group(self, group_id):
'''
a method to retrieve the details about a security group
:param group_id: string with AWS id of security group
:return: dictionary with security group details
relevant fields:
'group_id: '',
'vpc_id': '',
'group_name': '',
'tags': [{'key': '', 'value': ''}]
'ip_permissions': [{
'from_port': 0,
'ip_ranges':[{'cidr_ip':'0.0.0.0/0'}]
}]
'''
title = '%s.read_security_group' % self.__class__.__name__
# validate inputs
input_fields = {
'group_id': group_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Querying AWS region %s for properties of security group %s.' % (self.iam.region_name, group_id))
# construct keyword definitions
kw_args = { 'GroupIds': [ group_id ] }
# send request for details about security group
try:
response = self.connection.describe_security_groups(**kw_args)
except:
raise AWSConnectionError(title)
# construct security group details from response
group_info = response['SecurityGroups'][0]
group_details = {
'group_id': '',
'vpc_id': '',
'group_name': '',
'tags': [],
'ip_permissions': []
}
group_details = self.iam.ingest(group_info, group_details)
return group_details
|
python
|
def read_security_group(self, group_id):
'''
a method to retrieve the details about a security group
:param group_id: string with AWS id of security group
:return: dictionary with security group details
relevant fields:
'group_id: '',
'vpc_id': '',
'group_name': '',
'tags': [{'key': '', 'value': ''}]
'ip_permissions': [{
'from_port': 0,
'ip_ranges':[{'cidr_ip':'0.0.0.0/0'}]
}]
'''
title = '%s.read_security_group' % self.__class__.__name__
# validate inputs
input_fields = {
'group_id': group_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# report query
self.iam.printer('Querying AWS region %s for properties of security group %s.' % (self.iam.region_name, group_id))
# construct keyword definitions
kw_args = { 'GroupIds': [ group_id ] }
# send request for details about security group
try:
response = self.connection.describe_security_groups(**kw_args)
except:
raise AWSConnectionError(title)
# construct security group details from response
group_info = response['SecurityGroups'][0]
group_details = {
'group_id': '',
'vpc_id': '',
'group_name': '',
'tags': [],
'ip_permissions': []
}
group_details = self.iam.ingest(group_info, group_details)
return group_details
|
[
"def",
"read_security_group",
"(",
"self",
",",
"group_id",
")",
":",
"title",
"=",
"'%s.read_security_group'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs",
"input_fields",
"=",
"{",
"'group_id'",
":",
"group_id",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# report query",
"self",
".",
"iam",
".",
"printer",
"(",
"'Querying AWS region %s for properties of security group %s.'",
"%",
"(",
"self",
".",
"iam",
".",
"region_name",
",",
"group_id",
")",
")",
"# construct keyword definitions",
"kw_args",
"=",
"{",
"'GroupIds'",
":",
"[",
"group_id",
"]",
"}",
"# send request for details about security group",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"describe_security_groups",
"(",
"*",
"*",
"kw_args",
")",
"except",
":",
"raise",
"AWSConnectionError",
"(",
"title",
")",
"# construct security group details from response",
"group_info",
"=",
"response",
"[",
"'SecurityGroups'",
"]",
"[",
"0",
"]",
"group_details",
"=",
"{",
"'group_id'",
":",
"''",
",",
"'vpc_id'",
":",
"''",
",",
"'group_name'",
":",
"''",
",",
"'tags'",
":",
"[",
"]",
",",
"'ip_permissions'",
":",
"[",
"]",
"}",
"group_details",
"=",
"self",
".",
"iam",
".",
"ingest",
"(",
"group_info",
",",
"group_details",
")",
"return",
"group_details"
] |
a method to retrieve the details about a security group
:param group_id: string with AWS id of security group
:return: dictionary with security group details
relevant fields:
'group_id: '',
'vpc_id': '',
'group_name': '',
'tags': [{'key': '', 'value': ''}]
'ip_permissions': [{
'from_port': 0,
'ip_ranges':[{'cidr_ip':'0.0.0.0/0'}]
}]
|
[
"a",
"method",
"to",
"retrieve",
"the",
"details",
"about",
"a",
"security",
"group"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L1782-L1835
|
244,944
|
neumark/microcli
|
microcli.py
|
GlobalOptionParser.print_help
|
def print_help(self, file=None):
""" recursively call all command parsers' helps """
output = file or self.stderr
CustomStderrOptionParser.print_help(self, output)
output.write("\nCommands:\n")
for command_def in self.command_definitions.values():
command_def.opt_parser.print_help(output)
output.write("\n")
|
python
|
def print_help(self, file=None):
""" recursively call all command parsers' helps """
output = file or self.stderr
CustomStderrOptionParser.print_help(self, output)
output.write("\nCommands:\n")
for command_def in self.command_definitions.values():
command_def.opt_parser.print_help(output)
output.write("\n")
|
[
"def",
"print_help",
"(",
"self",
",",
"file",
"=",
"None",
")",
":",
"output",
"=",
"file",
"or",
"self",
".",
"stderr",
"CustomStderrOptionParser",
".",
"print_help",
"(",
"self",
",",
"output",
")",
"output",
".",
"write",
"(",
"\"\\nCommands:\\n\"",
")",
"for",
"command_def",
"in",
"self",
".",
"command_definitions",
".",
"values",
"(",
")",
":",
"command_def",
".",
"opt_parser",
".",
"print_help",
"(",
"output",
")",
"output",
".",
"write",
"(",
"\"\\n\"",
")"
] |
recursively call all command parsers' helps
|
[
"recursively",
"call",
"all",
"command",
"parsers",
"helps"
] |
fa31a35a95f63593ca12d246a5a84e2dff522dd6
|
https://github.com/neumark/microcli/blob/fa31a35a95f63593ca12d246a5a84e2dff522dd6/microcli.py#L151-L158
|
244,945
|
theirc/rapidsms-multitenancy
|
multitenancy/auth.py
|
get_user_groups
|
def get_user_groups(user):
"""Return the set of associated TenantGroups for the given user."""
if user.is_active and user.is_authenticated():
if user.is_superuser:
return TenantGroup.objects.all()
else:
return TenantGroup.objects.filter(tenantrole__user=user).distinct()
else:
return TenantGroup.objects.none()
|
python
|
def get_user_groups(user):
"""Return the set of associated TenantGroups for the given user."""
if user.is_active and user.is_authenticated():
if user.is_superuser:
return TenantGroup.objects.all()
else:
return TenantGroup.objects.filter(tenantrole__user=user).distinct()
else:
return TenantGroup.objects.none()
|
[
"def",
"get_user_groups",
"(",
"user",
")",
":",
"if",
"user",
".",
"is_active",
"and",
"user",
".",
"is_authenticated",
"(",
")",
":",
"if",
"user",
".",
"is_superuser",
":",
"return",
"TenantGroup",
".",
"objects",
".",
"all",
"(",
")",
"else",
":",
"return",
"TenantGroup",
".",
"objects",
".",
"filter",
"(",
"tenantrole__user",
"=",
"user",
")",
".",
"distinct",
"(",
")",
"else",
":",
"return",
"TenantGroup",
".",
"objects",
".",
"none",
"(",
")"
] |
Return the set of associated TenantGroups for the given user.
|
[
"Return",
"the",
"set",
"of",
"associated",
"TenantGroups",
"for",
"the",
"given",
"user",
"."
] |
121bd0a628e691a88aade2e10045cba43af2dfcb
|
https://github.com/theirc/rapidsms-multitenancy/blob/121bd0a628e691a88aade2e10045cba43af2dfcb/multitenancy/auth.py#L7-L15
|
244,946
|
theirc/rapidsms-multitenancy
|
multitenancy/auth.py
|
get_user_tenants
|
def get_user_tenants(user, group):
"""Return the set of associated Tenants for the given user and group."""
if user.is_active and user.is_authenticated():
if user.is_superuser or is_group_manager(user, group.pk):
return Tenant.objects.filter(group=group)
else:
return Tenant.objects.filter(group=group, tenantrole__user=user).distinct()
else:
return Tenant.objects.none()
|
python
|
def get_user_tenants(user, group):
"""Return the set of associated Tenants for the given user and group."""
if user.is_active and user.is_authenticated():
if user.is_superuser or is_group_manager(user, group.pk):
return Tenant.objects.filter(group=group)
else:
return Tenant.objects.filter(group=group, tenantrole__user=user).distinct()
else:
return Tenant.objects.none()
|
[
"def",
"get_user_tenants",
"(",
"user",
",",
"group",
")",
":",
"if",
"user",
".",
"is_active",
"and",
"user",
".",
"is_authenticated",
"(",
")",
":",
"if",
"user",
".",
"is_superuser",
"or",
"is_group_manager",
"(",
"user",
",",
"group",
".",
"pk",
")",
":",
"return",
"Tenant",
".",
"objects",
".",
"filter",
"(",
"group",
"=",
"group",
")",
"else",
":",
"return",
"Tenant",
".",
"objects",
".",
"filter",
"(",
"group",
"=",
"group",
",",
"tenantrole__user",
"=",
"user",
")",
".",
"distinct",
"(",
")",
"else",
":",
"return",
"Tenant",
".",
"objects",
".",
"none",
"(",
")"
] |
Return the set of associated Tenants for the given user and group.
|
[
"Return",
"the",
"set",
"of",
"associated",
"Tenants",
"for",
"the",
"given",
"user",
"and",
"group",
"."
] |
121bd0a628e691a88aade2e10045cba43af2dfcb
|
https://github.com/theirc/rapidsms-multitenancy/blob/121bd0a628e691a88aade2e10045cba43af2dfcb/multitenancy/auth.py#L18-L26
|
244,947
|
theirc/rapidsms-multitenancy
|
multitenancy/auth.py
|
get_user_roles
|
def get_user_roles(user):
"""Return a list of all of the user's roles."""
if not hasattr(user, '_role_cache'):
user._role_cache = list(TenantRole.objects.filter(user=user).values_list(
'group', 'role', 'tenant'))
return user._role_cache
|
python
|
def get_user_roles(user):
"""Return a list of all of the user's roles."""
if not hasattr(user, '_role_cache'):
user._role_cache = list(TenantRole.objects.filter(user=user).values_list(
'group', 'role', 'tenant'))
return user._role_cache
|
[
"def",
"get_user_roles",
"(",
"user",
")",
":",
"if",
"not",
"hasattr",
"(",
"user",
",",
"'_role_cache'",
")",
":",
"user",
".",
"_role_cache",
"=",
"list",
"(",
"TenantRole",
".",
"objects",
".",
"filter",
"(",
"user",
"=",
"user",
")",
".",
"values_list",
"(",
"'group'",
",",
"'role'",
",",
"'tenant'",
")",
")",
"return",
"user",
".",
"_role_cache"
] |
Return a list of all of the user's roles.
|
[
"Return",
"a",
"list",
"of",
"all",
"of",
"the",
"user",
"s",
"roles",
"."
] |
121bd0a628e691a88aade2e10045cba43af2dfcb
|
https://github.com/theirc/rapidsms-multitenancy/blob/121bd0a628e691a88aade2e10045cba43af2dfcb/multitenancy/auth.py#L29-L34
|
244,948
|
theirc/rapidsms-multitenancy
|
multitenancy/auth.py
|
is_group_manager
|
def is_group_manager(user, group=None):
"""Returns True if user is a group manager either for the group or any group."""
roles = get_user_roles(user)
return any(x[1] == TenantRole.ROLE_GROUP_MANAGER and (not group or x[0] == group) for x in roles)
|
python
|
def is_group_manager(user, group=None):
"""Returns True if user is a group manager either for the group or any group."""
roles = get_user_roles(user)
return any(x[1] == TenantRole.ROLE_GROUP_MANAGER and (not group or x[0] == group) for x in roles)
|
[
"def",
"is_group_manager",
"(",
"user",
",",
"group",
"=",
"None",
")",
":",
"roles",
"=",
"get_user_roles",
"(",
"user",
")",
"return",
"any",
"(",
"x",
"[",
"1",
"]",
"==",
"TenantRole",
".",
"ROLE_GROUP_MANAGER",
"and",
"(",
"not",
"group",
"or",
"x",
"[",
"0",
"]",
"==",
"group",
")",
"for",
"x",
"in",
"roles",
")"
] |
Returns True if user is a group manager either for the group or any group.
|
[
"Returns",
"True",
"if",
"user",
"is",
"a",
"group",
"manager",
"either",
"for",
"the",
"group",
"or",
"any",
"group",
"."
] |
121bd0a628e691a88aade2e10045cba43af2dfcb
|
https://github.com/theirc/rapidsms-multitenancy/blob/121bd0a628e691a88aade2e10045cba43af2dfcb/multitenancy/auth.py#L37-L40
|
244,949
|
nefarioustim/parker
|
parker/consumepage.py
|
get_instance
|
def get_instance(page_to_consume):
"""Return an instance of ConsumePage."""
global _instances
if isinstance(page_to_consume, basestring):
uri = page_to_consume
page_to_consume = page.get_instance(uri)
elif isinstance(page_to_consume, page.Page):
uri = page_to_consume.uri
else:
raise TypeError(
"get_instance() expects a parker.Page or basestring derivative."
)
page_to_consume.fetch()
parsed_page = parser.parse(page_to_consume)
try:
instance = _instances[uri]
except KeyError:
instance = ConsumePage(
parsed_page
)
_instances[uri] = instance
return instance
|
python
|
def get_instance(page_to_consume):
"""Return an instance of ConsumePage."""
global _instances
if isinstance(page_to_consume, basestring):
uri = page_to_consume
page_to_consume = page.get_instance(uri)
elif isinstance(page_to_consume, page.Page):
uri = page_to_consume.uri
else:
raise TypeError(
"get_instance() expects a parker.Page or basestring derivative."
)
page_to_consume.fetch()
parsed_page = parser.parse(page_to_consume)
try:
instance = _instances[uri]
except KeyError:
instance = ConsumePage(
parsed_page
)
_instances[uri] = instance
return instance
|
[
"def",
"get_instance",
"(",
"page_to_consume",
")",
":",
"global",
"_instances",
"if",
"isinstance",
"(",
"page_to_consume",
",",
"basestring",
")",
":",
"uri",
"=",
"page_to_consume",
"page_to_consume",
"=",
"page",
".",
"get_instance",
"(",
"uri",
")",
"elif",
"isinstance",
"(",
"page_to_consume",
",",
"page",
".",
"Page",
")",
":",
"uri",
"=",
"page_to_consume",
".",
"uri",
"else",
":",
"raise",
"TypeError",
"(",
"\"get_instance() expects a parker.Page or basestring derivative.\"",
")",
"page_to_consume",
".",
"fetch",
"(",
")",
"parsed_page",
"=",
"parser",
".",
"parse",
"(",
"page_to_consume",
")",
"try",
":",
"instance",
"=",
"_instances",
"[",
"uri",
"]",
"except",
"KeyError",
":",
"instance",
"=",
"ConsumePage",
"(",
"parsed_page",
")",
"_instances",
"[",
"uri",
"]",
"=",
"instance",
"return",
"instance"
] |
Return an instance of ConsumePage.
|
[
"Return",
"an",
"instance",
"of",
"ConsumePage",
"."
] |
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
|
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/consumepage.py#L12-L36
|
244,950
|
nefarioustim/parker
|
parker/consumepage.py
|
ConsumePage.get_key_value_dict_by_selectors
|
def get_key_value_dict_by_selectors(
self, key_selector, value_selector, value_sub_selector=None
):
"""Return a dictionary of key value data."""
key_nodes = self.parsedpage.get_nodes_by_selector(key_selector)
keys = [
self.parsedpage.get_text_from_node(node)
for node in key_nodes
]
value_nodes = self.parsedpage.get_nodes_by_selector(value_selector)
if value_sub_selector is not None:
vals = [
[
self.parsedpage.get_text_from_node(subnode)
for subnode in node.find(value_sub_selector)
]
for node in value_nodes.items()
]
else:
vals = [
self.parsedpage.get_text_from_node(node)
for node in value_nodes
]
return dict(zip(keys, vals))
|
python
|
def get_key_value_dict_by_selectors(
self, key_selector, value_selector, value_sub_selector=None
):
"""Return a dictionary of key value data."""
key_nodes = self.parsedpage.get_nodes_by_selector(key_selector)
keys = [
self.parsedpage.get_text_from_node(node)
for node in key_nodes
]
value_nodes = self.parsedpage.get_nodes_by_selector(value_selector)
if value_sub_selector is not None:
vals = [
[
self.parsedpage.get_text_from_node(subnode)
for subnode in node.find(value_sub_selector)
]
for node in value_nodes.items()
]
else:
vals = [
self.parsedpage.get_text_from_node(node)
for node in value_nodes
]
return dict(zip(keys, vals))
|
[
"def",
"get_key_value_dict_by_selectors",
"(",
"self",
",",
"key_selector",
",",
"value_selector",
",",
"value_sub_selector",
"=",
"None",
")",
":",
"key_nodes",
"=",
"self",
".",
"parsedpage",
".",
"get_nodes_by_selector",
"(",
"key_selector",
")",
"keys",
"=",
"[",
"self",
".",
"parsedpage",
".",
"get_text_from_node",
"(",
"node",
")",
"for",
"node",
"in",
"key_nodes",
"]",
"value_nodes",
"=",
"self",
".",
"parsedpage",
".",
"get_nodes_by_selector",
"(",
"value_selector",
")",
"if",
"value_sub_selector",
"is",
"not",
"None",
":",
"vals",
"=",
"[",
"[",
"self",
".",
"parsedpage",
".",
"get_text_from_node",
"(",
"subnode",
")",
"for",
"subnode",
"in",
"node",
".",
"find",
"(",
"value_sub_selector",
")",
"]",
"for",
"node",
"in",
"value_nodes",
".",
"items",
"(",
")",
"]",
"else",
":",
"vals",
"=",
"[",
"self",
".",
"parsedpage",
".",
"get_text_from_node",
"(",
"node",
")",
"for",
"node",
"in",
"value_nodes",
"]",
"return",
"dict",
"(",
"zip",
"(",
"keys",
",",
"vals",
")",
")"
] |
Return a dictionary of key value data.
|
[
"Return",
"a",
"dictionary",
"of",
"key",
"value",
"data",
"."
] |
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
|
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/consumepage.py#L53-L78
|
244,951
|
nefarioustim/parker
|
parker/consumepage.py
|
ConsumePage.get_crumb_list_by_selector
|
def get_crumb_list_by_selector(self, crumb_selector):
"""Return a list of crumbs."""
return [
self.parsedpage.get_text_from_node(crumb)
for crumb in self.parsedpage.get_nodes_by_selector(crumb_selector)
]
|
python
|
def get_crumb_list_by_selector(self, crumb_selector):
"""Return a list of crumbs."""
return [
self.parsedpage.get_text_from_node(crumb)
for crumb in self.parsedpage.get_nodes_by_selector(crumb_selector)
]
|
[
"def",
"get_crumb_list_by_selector",
"(",
"self",
",",
"crumb_selector",
")",
":",
"return",
"[",
"self",
".",
"parsedpage",
".",
"get_text_from_node",
"(",
"crumb",
")",
"for",
"crumb",
"in",
"self",
".",
"parsedpage",
".",
"get_nodes_by_selector",
"(",
"crumb_selector",
")",
"]"
] |
Return a list of crumbs.
|
[
"Return",
"a",
"list",
"of",
"crumbs",
"."
] |
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
|
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/consumepage.py#L80-L85
|
244,952
|
nefarioustim/parker
|
parker/consumepage.py
|
ConsumePage.get_media_list_by_selector
|
def get_media_list_by_selector(
self, media_selector, media_attribute="src"
):
"""Return a list of media."""
page_url = urlparse.urlparse(self.uri)
return [
mediafile.get_instance(
urlparse.urljoin(
"%s://%s" % (
page_url.scheme,
page_url.netloc
),
urlparse.urlparse(
media.attrib[media_attribute],
scheme="http"
).geturl()
)
)
for media in self.parsedpage.get_nodes_by_selector(media_selector)
]
|
python
|
def get_media_list_by_selector(
self, media_selector, media_attribute="src"
):
"""Return a list of media."""
page_url = urlparse.urlparse(self.uri)
return [
mediafile.get_instance(
urlparse.urljoin(
"%s://%s" % (
page_url.scheme,
page_url.netloc
),
urlparse.urlparse(
media.attrib[media_attribute],
scheme="http"
).geturl()
)
)
for media in self.parsedpage.get_nodes_by_selector(media_selector)
]
|
[
"def",
"get_media_list_by_selector",
"(",
"self",
",",
"media_selector",
",",
"media_attribute",
"=",
"\"src\"",
")",
":",
"page_url",
"=",
"urlparse",
".",
"urlparse",
"(",
"self",
".",
"uri",
")",
"return",
"[",
"mediafile",
".",
"get_instance",
"(",
"urlparse",
".",
"urljoin",
"(",
"\"%s://%s\"",
"%",
"(",
"page_url",
".",
"scheme",
",",
"page_url",
".",
"netloc",
")",
",",
"urlparse",
".",
"urlparse",
"(",
"media",
".",
"attrib",
"[",
"media_attribute",
"]",
",",
"scheme",
"=",
"\"http\"",
")",
".",
"geturl",
"(",
")",
")",
")",
"for",
"media",
"in",
"self",
".",
"parsedpage",
".",
"get_nodes_by_selector",
"(",
"media_selector",
")",
"]"
] |
Return a list of media.
|
[
"Return",
"a",
"list",
"of",
"media",
"."
] |
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
|
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/consumepage.py#L87-L106
|
244,953
|
nefarioustim/parker
|
parker/consumepage.py
|
ConsumePage.get_data_dict_from_config
|
def get_data_dict_from_config(self, config_dict):
"""Return a dictionary of data inferred from config_dict."""
return {
key: self.parsedpage.get_filtered_values_by_selector(
item_dict['selector'],
item_dict.get('regex_filter', None),
item_dict.get('regex_group', 1)
)
for key, item_dict in config_dict.iteritems()
if item_dict.get('selector', None) is not None
}
|
python
|
def get_data_dict_from_config(self, config_dict):
"""Return a dictionary of data inferred from config_dict."""
return {
key: self.parsedpage.get_filtered_values_by_selector(
item_dict['selector'],
item_dict.get('regex_filter', None),
item_dict.get('regex_group', 1)
)
for key, item_dict in config_dict.iteritems()
if item_dict.get('selector', None) is not None
}
|
[
"def",
"get_data_dict_from_config",
"(",
"self",
",",
"config_dict",
")",
":",
"return",
"{",
"key",
":",
"self",
".",
"parsedpage",
".",
"get_filtered_values_by_selector",
"(",
"item_dict",
"[",
"'selector'",
"]",
",",
"item_dict",
".",
"get",
"(",
"'regex_filter'",
",",
"None",
")",
",",
"item_dict",
".",
"get",
"(",
"'regex_group'",
",",
"1",
")",
")",
"for",
"key",
",",
"item_dict",
"in",
"config_dict",
".",
"iteritems",
"(",
")",
"if",
"item_dict",
".",
"get",
"(",
"'selector'",
",",
"None",
")",
"is",
"not",
"None",
"}"
] |
Return a dictionary of data inferred from config_dict.
|
[
"Return",
"a",
"dictionary",
"of",
"data",
"inferred",
"from",
"config_dict",
"."
] |
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
|
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/consumepage.py#L108-L118
|
244,954
|
edwards-lab/MVtest
|
scripts/mvmany.py
|
generate_jobs
|
def generate_jobs(args, job_list, argument_string):
"""Generate actual scripts to be submitted to the cluster
:param args: argparse argument collection
:param job_list: dictionary containing each each job to be submitted
:param argument_string: string containing general arguments to be used by mvtest.py during execution
:return: None
"""
mvtest_path = args.mvpath
template = "".join(args.template.readlines())
logpath = os.path.abspath(args.logpath)
respath = os.path.abspath(args.res_path)
scriptpath = os.path.abspath(args.script_path)
pwd = os.path.abspath(os.getcwd())
for jobname in job_list.keys():
filename = "%s/%s.sh" % (scriptpath, jobname)
job_body = mvtest_path + " " + argument_string + " " + job_list[jobname]
contents = Template(template).safe_substitute(
logpath=logpath,
respath=respath,
body=job_body,
jobname=jobname,
memory=args.mem,
walltime=args.walltime,
pwd=pwd)
file = open(filename, "w")
print >> file,contents
|
python
|
def generate_jobs(args, job_list, argument_string):
"""Generate actual scripts to be submitted to the cluster
:param args: argparse argument collection
:param job_list: dictionary containing each each job to be submitted
:param argument_string: string containing general arguments to be used by mvtest.py during execution
:return: None
"""
mvtest_path = args.mvpath
template = "".join(args.template.readlines())
logpath = os.path.abspath(args.logpath)
respath = os.path.abspath(args.res_path)
scriptpath = os.path.abspath(args.script_path)
pwd = os.path.abspath(os.getcwd())
for jobname in job_list.keys():
filename = "%s/%s.sh" % (scriptpath, jobname)
job_body = mvtest_path + " " + argument_string + " " + job_list[jobname]
contents = Template(template).safe_substitute(
logpath=logpath,
respath=respath,
body=job_body,
jobname=jobname,
memory=args.mem,
walltime=args.walltime,
pwd=pwd)
file = open(filename, "w")
print >> file,contents
|
[
"def",
"generate_jobs",
"(",
"args",
",",
"job_list",
",",
"argument_string",
")",
":",
"mvtest_path",
"=",
"args",
".",
"mvpath",
"template",
"=",
"\"\"",
".",
"join",
"(",
"args",
".",
"template",
".",
"readlines",
"(",
")",
")",
"logpath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"logpath",
")",
"respath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"res_path",
")",
"scriptpath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"script_path",
")",
"pwd",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
"for",
"jobname",
"in",
"job_list",
".",
"keys",
"(",
")",
":",
"filename",
"=",
"\"%s/%s.sh\"",
"%",
"(",
"scriptpath",
",",
"jobname",
")",
"job_body",
"=",
"mvtest_path",
"+",
"\" \"",
"+",
"argument_string",
"+",
"\" \"",
"+",
"job_list",
"[",
"jobname",
"]",
"contents",
"=",
"Template",
"(",
"template",
")",
".",
"safe_substitute",
"(",
"logpath",
"=",
"logpath",
",",
"respath",
"=",
"respath",
",",
"body",
"=",
"job_body",
",",
"jobname",
"=",
"jobname",
",",
"memory",
"=",
"args",
".",
"mem",
",",
"walltime",
"=",
"args",
".",
"walltime",
",",
"pwd",
"=",
"pwd",
")",
"file",
"=",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"print",
">>",
"file",
",",
"contents"
] |
Generate actual scripts to be submitted to the cluster
:param args: argparse argument collection
:param job_list: dictionary containing each each job to be submitted
:param argument_string: string containing general arguments to be used by mvtest.py during execution
:return: None
|
[
"Generate",
"actual",
"scripts",
"to",
"be",
"submitted",
"to",
"the",
"cluster"
] |
fe8cf627464ef59d68b7eda628a19840d033882f
|
https://github.com/edwards-lab/MVtest/blob/fe8cf627464ef59d68b7eda628a19840d033882f/scripts/mvmany.py#L45-L73
|
244,955
|
edwards-lab/MVtest
|
scripts/mvmany.py
|
get_template_file
|
def get_template_file(args):
"""Returns valid template file, generating the default template file if it doesn't exist and one wasn't
specified on command line.
:param args: Argument collection as generated by parseargs
:return file"""
if args.template is None:
template_filename = os.getenv("HOME") + "/.mvmany.template"
try:
template_filename = open(template_filename, "r")
except:
with open(template_filename, "w") as file:
print >> file, """#SBATCH --job-name=$jobname
#SBATCH --nodes=1
#SBATCH --tasks-per-node=1
#SBATCH --cpus-per-task=1
#SBATCH --mem=$memory
#SBATCH --time=$walltime
#SBATCH --error $logpath/$jobname.e
#SBATCH --output $respath/$jobname.txt
cd $pwd
$body
"""
print >> sys.stderr, """PLEASE NOTE: \n
A default template file, %s, has been created. You are encouraged to configure it according to work with your cluster
management software or personalize it with email notifications, etc.\n"""
template_filename = open(template_filename, "r")
return template_filename
|
python
|
def get_template_file(args):
"""Returns valid template file, generating the default template file if it doesn't exist and one wasn't
specified on command line.
:param args: Argument collection as generated by parseargs
:return file"""
if args.template is None:
template_filename = os.getenv("HOME") + "/.mvmany.template"
try:
template_filename = open(template_filename, "r")
except:
with open(template_filename, "w") as file:
print >> file, """#SBATCH --job-name=$jobname
#SBATCH --nodes=1
#SBATCH --tasks-per-node=1
#SBATCH --cpus-per-task=1
#SBATCH --mem=$memory
#SBATCH --time=$walltime
#SBATCH --error $logpath/$jobname.e
#SBATCH --output $respath/$jobname.txt
cd $pwd
$body
"""
print >> sys.stderr, """PLEASE NOTE: \n
A default template file, %s, has been created. You are encouraged to configure it according to work with your cluster
management software or personalize it with email notifications, etc.\n"""
template_filename = open(template_filename, "r")
return template_filename
|
[
"def",
"get_template_file",
"(",
"args",
")",
":",
"if",
"args",
".",
"template",
"is",
"None",
":",
"template_filename",
"=",
"os",
".",
"getenv",
"(",
"\"HOME\"",
")",
"+",
"\"/.mvmany.template\"",
"try",
":",
"template_filename",
"=",
"open",
"(",
"template_filename",
",",
"\"r\"",
")",
"except",
":",
"with",
"open",
"(",
"template_filename",
",",
"\"w\"",
")",
"as",
"file",
":",
"print",
">>",
"file",
",",
"\"\"\"#SBATCH --job-name=$jobname\n#SBATCH --nodes=1\n#SBATCH --tasks-per-node=1\n#SBATCH --cpus-per-task=1\n#SBATCH --mem=$memory\n#SBATCH --time=$walltime\n#SBATCH --error $logpath/$jobname.e\n#SBATCH --output $respath/$jobname.txt\n\ncd $pwd\n\n$body\n \"\"\"",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"\"\"PLEASE NOTE: \\n\nA default template file, %s, has been created. You are encouraged to configure it according to work with your cluster\nmanagement software or personalize it with email notifications, etc.\\n\"\"\"",
"template_filename",
"=",
"open",
"(",
"template_filename",
",",
"\"r\"",
")",
"return",
"template_filename"
] |
Returns valid template file, generating the default template file if it doesn't exist and one wasn't
specified on command line.
:param args: Argument collection as generated by parseargs
:return file
|
[
"Returns",
"valid",
"template",
"file",
"generating",
"the",
"default",
"template",
"file",
"if",
"it",
"doesn",
"t",
"exist",
"and",
"one",
"wasn",
"t",
"specified",
"on",
"command",
"line",
"."
] |
fe8cf627464ef59d68b7eda628a19840d033882f
|
https://github.com/edwards-lab/MVtest/blob/fe8cf627464ef59d68b7eda628a19840d033882f/scripts/mvmany.py#L81-L111
|
244,956
|
edwards-lab/MVtest
|
scripts/mvmany.py
|
split_mach_jobs
|
def split_mach_jobs(args, filename):
"""Parse the MACH file and generate the list of jobs.
:param args: Arguments from parseargs
:param filename: name of file containing list of mach dosage files
:return jobs to be run
"""
max_snp_count = args.snps_per_job
job_list = {}
cur = None
last_pos = None
job_string = ""
job_name = ""
mach_count = 1
if args.mach_count:
mach_count = args.mach_count
ExitIf("mvmany doesn't support splitting mach jobs into pieces at this time", max_snp_count > 1)
dosage_files = []
for line in open(filename):
dosage_files.append(line.strip().split("/")[-1].split(".")[0])
dosage_files.append(".".join(line.strip().split()[-1].split("/")[-1].split(".")[0:-1]))
file_count = len(dosage_files)
job_count = int(math.ceil(float(file_count) / mach_count))
for job_num in range(job_count):
job_idx = job_num * mach_count + 1
job_string = "--mach-count %d --mach-offset %d" % (mach_count, job_idx)
job_name = "job%04d-%s" % (job_num+1, dosage_files[job_idx - 1])
job_list[job_name] = job_string
return job_list
|
python
|
def split_mach_jobs(args, filename):
"""Parse the MACH file and generate the list of jobs.
:param args: Arguments from parseargs
:param filename: name of file containing list of mach dosage files
:return jobs to be run
"""
max_snp_count = args.snps_per_job
job_list = {}
cur = None
last_pos = None
job_string = ""
job_name = ""
mach_count = 1
if args.mach_count:
mach_count = args.mach_count
ExitIf("mvmany doesn't support splitting mach jobs into pieces at this time", max_snp_count > 1)
dosage_files = []
for line in open(filename):
dosage_files.append(line.strip().split("/")[-1].split(".")[0])
dosage_files.append(".".join(line.strip().split()[-1].split("/")[-1].split(".")[0:-1]))
file_count = len(dosage_files)
job_count = int(math.ceil(float(file_count) / mach_count))
for job_num in range(job_count):
job_idx = job_num * mach_count + 1
job_string = "--mach-count %d --mach-offset %d" % (mach_count, job_idx)
job_name = "job%04d-%s" % (job_num+1, dosage_files[job_idx - 1])
job_list[job_name] = job_string
return job_list
|
[
"def",
"split_mach_jobs",
"(",
"args",
",",
"filename",
")",
":",
"max_snp_count",
"=",
"args",
".",
"snps_per_job",
"job_list",
"=",
"{",
"}",
"cur",
"=",
"None",
"last_pos",
"=",
"None",
"job_string",
"=",
"\"\"",
"job_name",
"=",
"\"\"",
"mach_count",
"=",
"1",
"if",
"args",
".",
"mach_count",
":",
"mach_count",
"=",
"args",
".",
"mach_count",
"ExitIf",
"(",
"\"mvmany doesn't support splitting mach jobs into pieces at this time\"",
",",
"max_snp_count",
">",
"1",
")",
"dosage_files",
"=",
"[",
"]",
"for",
"line",
"in",
"open",
"(",
"filename",
")",
":",
"dosage_files",
".",
"append",
"(",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
")",
"dosage_files",
".",
"append",
"(",
"\".\"",
".",
"join",
"(",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
":",
"-",
"1",
"]",
")",
")",
"file_count",
"=",
"len",
"(",
"dosage_files",
")",
"job_count",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"float",
"(",
"file_count",
")",
"/",
"mach_count",
")",
")",
"for",
"job_num",
"in",
"range",
"(",
"job_count",
")",
":",
"job_idx",
"=",
"job_num",
"*",
"mach_count",
"+",
"1",
"job_string",
"=",
"\"--mach-count %d --mach-offset %d\"",
"%",
"(",
"mach_count",
",",
"job_idx",
")",
"job_name",
"=",
"\"job%04d-%s\"",
"%",
"(",
"job_num",
"+",
"1",
",",
"dosage_files",
"[",
"job_idx",
"-",
"1",
"]",
")",
"job_list",
"[",
"job_name",
"]",
"=",
"job_string",
"return",
"job_list"
] |
Parse the MACH file and generate the list of jobs.
:param args: Arguments from parseargs
:param filename: name of file containing list of mach dosage files
:return jobs to be run
|
[
"Parse",
"the",
"MACH",
"file",
"and",
"generate",
"the",
"list",
"of",
"jobs",
"."
] |
fe8cf627464ef59d68b7eda628a19840d033882f
|
https://github.com/edwards-lab/MVtest/blob/fe8cf627464ef59d68b7eda628a19840d033882f/scripts/mvmany.py#L113-L148
|
244,957
|
edwards-lab/MVtest
|
scripts/mvmany.py
|
split_impute_jobs
|
def split_impute_jobs(args, filename):
"""Parse the IMPUTE file and generate the list of jobs.
:param args: parsearg object containing command line arguments
:filename args: file containing the IMPUTE gen files and chromosome numbers
"""
max_snp_count = args.snps_per_job
if args.impute_count:
impute_count = args.impute_count
else:
impute_count = 1
ExitIf("mvmany doesn't support splitting IMPUTE jobs into pieces at this time", max_snp_count > 1)
job_list = {}
gen_files = []
for line in open(filename):
gen_files.append(".".join(line.strip().split()[-1].split("/")[-1].split(".")[0:-1]))
file_count = len(gen_files)
job_count = int(math.ceil(float(file_count) / impute_count))
for job_num in range(job_count):
job_idx = job_num * impute_count + 1
job_string = "--impute-offset %d --impute-count %d" % (job_idx, impute_count)
job_name = "job%04d-%s" % (job_num+1, gen_files[job_idx -1])
job_list[job_name] = job_string
print job_string
return job_list
# For now, let's not deal with the complexity of splitting chromosomes in IMPUTE
poscol = 2
cur = None
last_pos = None
job_string = ""
job_name = ""
file_index = 0
for line in open(filename):
chr, genfile = line.strip().split()
if max_snp_count > 0:
locus_index = 0
last_pos = 1
for locus in open(genfile):
if locus_index >= max_snp_count - 1:
rsid, pos = locus.split()[1:2]
job_name = "chr%d_%d" % (chr, last_pos)
job_string = "--chr %s --from-bp %d --to-bp %d" % (chr, last_pos, pos)
last_pos = pos + 1
job_list[job_name] = job_string
locus_index = 0
if cur is None:
cur = pos
for line in sys_call("cut -f 1,%d %s" % (poscol, chrom_file)):
chrom, pos = [int(x) for x in line.split()]
if cur is None: # First line observed
cur = chrom
job_string = "--chr %d --from-bp %d" % (chrom, pos)
job_name = "Chr%d_%d-" % (chrom, pos)
snp_count = 0
elif cur != cur: # Changed chromosome
job_string += " --to-bp %d" % (last_pos)
job_name += str(last_pos)
job_list[job_name] = job_string
cur = chrom
job_string = "--chr %d --from-bp %d" % (chrom, pos)
job_name = "Chr%d_%d-" % (chrom, pos)
snp_count = 0
# create new job based on snp count
elif snp_count < max_snp_count:
snp_count += 1
else:
job_string += " --to-bp %d" % (last_pos)
job_name += str(last_pos)
job_list[job_name] = job_string
job_string = "--chr %d --from-bp" % (chrom, pos)
job_name = "Chr%d_%d-" % (chrom, pos)
snp_count = 0
last_pos = pos
if job_string != "":
job_string += " --to-bp %d" % (last_pos)
job_name += str(last_pos)
job_list[job_name] = job_string
return job_list
|
python
|
def split_impute_jobs(args, filename):
"""Parse the IMPUTE file and generate the list of jobs.
:param args: parsearg object containing command line arguments
:filename args: file containing the IMPUTE gen files and chromosome numbers
"""
max_snp_count = args.snps_per_job
if args.impute_count:
impute_count = args.impute_count
else:
impute_count = 1
ExitIf("mvmany doesn't support splitting IMPUTE jobs into pieces at this time", max_snp_count > 1)
job_list = {}
gen_files = []
for line in open(filename):
gen_files.append(".".join(line.strip().split()[-1].split("/")[-1].split(".")[0:-1]))
file_count = len(gen_files)
job_count = int(math.ceil(float(file_count) / impute_count))
for job_num in range(job_count):
job_idx = job_num * impute_count + 1
job_string = "--impute-offset %d --impute-count %d" % (job_idx, impute_count)
job_name = "job%04d-%s" % (job_num+1, gen_files[job_idx -1])
job_list[job_name] = job_string
print job_string
return job_list
# For now, let's not deal with the complexity of splitting chromosomes in IMPUTE
poscol = 2
cur = None
last_pos = None
job_string = ""
job_name = ""
file_index = 0
for line in open(filename):
chr, genfile = line.strip().split()
if max_snp_count > 0:
locus_index = 0
last_pos = 1
for locus in open(genfile):
if locus_index >= max_snp_count - 1:
rsid, pos = locus.split()[1:2]
job_name = "chr%d_%d" % (chr, last_pos)
job_string = "--chr %s --from-bp %d --to-bp %d" % (chr, last_pos, pos)
last_pos = pos + 1
job_list[job_name] = job_string
locus_index = 0
if cur is None:
cur = pos
for line in sys_call("cut -f 1,%d %s" % (poscol, chrom_file)):
chrom, pos = [int(x) for x in line.split()]
if cur is None: # First line observed
cur = chrom
job_string = "--chr %d --from-bp %d" % (chrom, pos)
job_name = "Chr%d_%d-" % (chrom, pos)
snp_count = 0
elif cur != cur: # Changed chromosome
job_string += " --to-bp %d" % (last_pos)
job_name += str(last_pos)
job_list[job_name] = job_string
cur = chrom
job_string = "--chr %d --from-bp %d" % (chrom, pos)
job_name = "Chr%d_%d-" % (chrom, pos)
snp_count = 0
# create new job based on snp count
elif snp_count < max_snp_count:
snp_count += 1
else:
job_string += " --to-bp %d" % (last_pos)
job_name += str(last_pos)
job_list[job_name] = job_string
job_string = "--chr %d --from-bp" % (chrom, pos)
job_name = "Chr%d_%d-" % (chrom, pos)
snp_count = 0
last_pos = pos
if job_string != "":
job_string += " --to-bp %d" % (last_pos)
job_name += str(last_pos)
job_list[job_name] = job_string
return job_list
|
[
"def",
"split_impute_jobs",
"(",
"args",
",",
"filename",
")",
":",
"max_snp_count",
"=",
"args",
".",
"snps_per_job",
"if",
"args",
".",
"impute_count",
":",
"impute_count",
"=",
"args",
".",
"impute_count",
"else",
":",
"impute_count",
"=",
"1",
"ExitIf",
"(",
"\"mvmany doesn't support splitting IMPUTE jobs into pieces at this time\"",
",",
"max_snp_count",
">",
"1",
")",
"job_list",
"=",
"{",
"}",
"gen_files",
"=",
"[",
"]",
"for",
"line",
"in",
"open",
"(",
"filename",
")",
":",
"gen_files",
".",
"append",
"(",
"\".\"",
".",
"join",
"(",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
":",
"-",
"1",
"]",
")",
")",
"file_count",
"=",
"len",
"(",
"gen_files",
")",
"job_count",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"float",
"(",
"file_count",
")",
"/",
"impute_count",
")",
")",
"for",
"job_num",
"in",
"range",
"(",
"job_count",
")",
":",
"job_idx",
"=",
"job_num",
"*",
"impute_count",
"+",
"1",
"job_string",
"=",
"\"--impute-offset %d --impute-count %d\"",
"%",
"(",
"job_idx",
",",
"impute_count",
")",
"job_name",
"=",
"\"job%04d-%s\"",
"%",
"(",
"job_num",
"+",
"1",
",",
"gen_files",
"[",
"job_idx",
"-",
"1",
"]",
")",
"job_list",
"[",
"job_name",
"]",
"=",
"job_string",
"print",
"job_string",
"return",
"job_list",
"# For now, let's not deal with the complexity of splitting chromosomes in IMPUTE",
"poscol",
"=",
"2",
"cur",
"=",
"None",
"last_pos",
"=",
"None",
"job_string",
"=",
"\"\"",
"job_name",
"=",
"\"\"",
"file_index",
"=",
"0",
"for",
"line",
"in",
"open",
"(",
"filename",
")",
":",
"chr",
",",
"genfile",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"max_snp_count",
">",
"0",
":",
"locus_index",
"=",
"0",
"last_pos",
"=",
"1",
"for",
"locus",
"in",
"open",
"(",
"genfile",
")",
":",
"if",
"locus_index",
">=",
"max_snp_count",
"-",
"1",
":",
"rsid",
",",
"pos",
"=",
"locus",
".",
"split",
"(",
")",
"[",
"1",
":",
"2",
"]",
"job_name",
"=",
"\"chr%d_%d\"",
"%",
"(",
"chr",
",",
"last_pos",
")",
"job_string",
"=",
"\"--chr %s --from-bp %d --to-bp %d\"",
"%",
"(",
"chr",
",",
"last_pos",
",",
"pos",
")",
"last_pos",
"=",
"pos",
"+",
"1",
"job_list",
"[",
"job_name",
"]",
"=",
"job_string",
"locus_index",
"=",
"0",
"if",
"cur",
"is",
"None",
":",
"cur",
"=",
"pos",
"for",
"line",
"in",
"sys_call",
"(",
"\"cut -f 1,%d %s\"",
"%",
"(",
"poscol",
",",
"chrom_file",
")",
")",
":",
"chrom",
",",
"pos",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"line",
".",
"split",
"(",
")",
"]",
"if",
"cur",
"is",
"None",
":",
"# First line observed",
"cur",
"=",
"chrom",
"job_string",
"=",
"\"--chr %d --from-bp %d\"",
"%",
"(",
"chrom",
",",
"pos",
")",
"job_name",
"=",
"\"Chr%d_%d-\"",
"%",
"(",
"chrom",
",",
"pos",
")",
"snp_count",
"=",
"0",
"elif",
"cur",
"!=",
"cur",
":",
"# Changed chromosome",
"job_string",
"+=",
"\" --to-bp %d\"",
"%",
"(",
"last_pos",
")",
"job_name",
"+=",
"str",
"(",
"last_pos",
")",
"job_list",
"[",
"job_name",
"]",
"=",
"job_string",
"cur",
"=",
"chrom",
"job_string",
"=",
"\"--chr %d --from-bp %d\"",
"%",
"(",
"chrom",
",",
"pos",
")",
"job_name",
"=",
"\"Chr%d_%d-\"",
"%",
"(",
"chrom",
",",
"pos",
")",
"snp_count",
"=",
"0",
"# create new job based on snp count",
"elif",
"snp_count",
"<",
"max_snp_count",
":",
"snp_count",
"+=",
"1",
"else",
":",
"job_string",
"+=",
"\" --to-bp %d\"",
"%",
"(",
"last_pos",
")",
"job_name",
"+=",
"str",
"(",
"last_pos",
")",
"job_list",
"[",
"job_name",
"]",
"=",
"job_string",
"job_string",
"=",
"\"--chr %d --from-bp\"",
"%",
"(",
"chrom",
",",
"pos",
")",
"job_name",
"=",
"\"Chr%d_%d-\"",
"%",
"(",
"chrom",
",",
"pos",
")",
"snp_count",
"=",
"0",
"last_pos",
"=",
"pos",
"if",
"job_string",
"!=",
"\"\"",
":",
"job_string",
"+=",
"\" --to-bp %d\"",
"%",
"(",
"last_pos",
")",
"job_name",
"+=",
"str",
"(",
"last_pos",
")",
"job_list",
"[",
"job_name",
"]",
"=",
"job_string",
"return",
"job_list"
] |
Parse the IMPUTE file and generate the list of jobs.
:param args: parsearg object containing command line arguments
:filename args: file containing the IMPUTE gen files and chromosome numbers
|
[
"Parse",
"the",
"IMPUTE",
"file",
"and",
"generate",
"the",
"list",
"of",
"jobs",
"."
] |
fe8cf627464ef59d68b7eda628a19840d033882f
|
https://github.com/edwards-lab/MVtest/blob/fe8cf627464ef59d68b7eda628a19840d033882f/scripts/mvmany.py#L150-L241
|
244,958
|
edwards-lab/MVtest
|
scripts/mvmany.py
|
split_chrom_jobs
|
def split_chrom_jobs(args, chrom_file):
"""Split up GWAS jobs based on portions of a chromosome
:param args: arguments from parseargs
:param chrom_file: marker info file
:return dictionary name=>job_details
"""
max_snp_count = args.snps_per_job
poscol = 3
if args.map3:
poscol = 2
job_list = {}
cur = None
last_pos = None
job_string = ""
job_name = ""
for line in sys_call("cut -f 1,%d %s" % (poscol, chrom_file)):
pos = -1
values = line.split()
if len(values) > 0:
chrom, pos = [int(x) for x in values]
if cur is None: # First line observed
cur = chrom
job_string = "--chr %d --from-bp %d" % (chrom, pos)
job_name = "Chr%d_%d-" % (chrom, pos)
snp_count = 0
elif cur != cur: # Changed chromosome
job_string += " --to-bp %d" % (last_pos)
job_name += str(last_pos)
job_list[job_name] = job_string
cur = chrom
job_string = "--chr %d --from-bp %d" % (chrom, pos)
job_name = "Chr%d_%d-" % (chrom, pos)
snp_count = 0
# create new job based on snp count
elif snp_count < max_snp_count:
snp_count += 1
else:
job_string += " --to-bp %d" % (last_pos)
job_name += str(last_pos)
job_list[job_name] = job_string
job_string = "--chr %d --from-bp" % (chrom, pos)
job_name = "Chr%d_%d-" % (chrom, pos)
snp_count = 0
last_pos = pos
if job_string != "":
job_string += " --to-bp %d" % (last_pos)
job_name += str(last_pos)
job_list[job_name] = job_string
return job_list
|
python
|
def split_chrom_jobs(args, chrom_file):
"""Split up GWAS jobs based on portions of a chromosome
:param args: arguments from parseargs
:param chrom_file: marker info file
:return dictionary name=>job_details
"""
max_snp_count = args.snps_per_job
poscol = 3
if args.map3:
poscol = 2
job_list = {}
cur = None
last_pos = None
job_string = ""
job_name = ""
for line in sys_call("cut -f 1,%d %s" % (poscol, chrom_file)):
pos = -1
values = line.split()
if len(values) > 0:
chrom, pos = [int(x) for x in values]
if cur is None: # First line observed
cur = chrom
job_string = "--chr %d --from-bp %d" % (chrom, pos)
job_name = "Chr%d_%d-" % (chrom, pos)
snp_count = 0
elif cur != cur: # Changed chromosome
job_string += " --to-bp %d" % (last_pos)
job_name += str(last_pos)
job_list[job_name] = job_string
cur = chrom
job_string = "--chr %d --from-bp %d" % (chrom, pos)
job_name = "Chr%d_%d-" % (chrom, pos)
snp_count = 0
# create new job based on snp count
elif snp_count < max_snp_count:
snp_count += 1
else:
job_string += " --to-bp %d" % (last_pos)
job_name += str(last_pos)
job_list[job_name] = job_string
job_string = "--chr %d --from-bp" % (chrom, pos)
job_name = "Chr%d_%d-" % (chrom, pos)
snp_count = 0
last_pos = pos
if job_string != "":
job_string += " --to-bp %d" % (last_pos)
job_name += str(last_pos)
job_list[job_name] = job_string
return job_list
|
[
"def",
"split_chrom_jobs",
"(",
"args",
",",
"chrom_file",
")",
":",
"max_snp_count",
"=",
"args",
".",
"snps_per_job",
"poscol",
"=",
"3",
"if",
"args",
".",
"map3",
":",
"poscol",
"=",
"2",
"job_list",
"=",
"{",
"}",
"cur",
"=",
"None",
"last_pos",
"=",
"None",
"job_string",
"=",
"\"\"",
"job_name",
"=",
"\"\"",
"for",
"line",
"in",
"sys_call",
"(",
"\"cut -f 1,%d %s\"",
"%",
"(",
"poscol",
",",
"chrom_file",
")",
")",
":",
"pos",
"=",
"-",
"1",
"values",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"values",
")",
">",
"0",
":",
"chrom",
",",
"pos",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"values",
"]",
"if",
"cur",
"is",
"None",
":",
"# First line observed",
"cur",
"=",
"chrom",
"job_string",
"=",
"\"--chr %d --from-bp %d\"",
"%",
"(",
"chrom",
",",
"pos",
")",
"job_name",
"=",
"\"Chr%d_%d-\"",
"%",
"(",
"chrom",
",",
"pos",
")",
"snp_count",
"=",
"0",
"elif",
"cur",
"!=",
"cur",
":",
"# Changed chromosome",
"job_string",
"+=",
"\" --to-bp %d\"",
"%",
"(",
"last_pos",
")",
"job_name",
"+=",
"str",
"(",
"last_pos",
")",
"job_list",
"[",
"job_name",
"]",
"=",
"job_string",
"cur",
"=",
"chrom",
"job_string",
"=",
"\"--chr %d --from-bp %d\"",
"%",
"(",
"chrom",
",",
"pos",
")",
"job_name",
"=",
"\"Chr%d_%d-\"",
"%",
"(",
"chrom",
",",
"pos",
")",
"snp_count",
"=",
"0",
"# create new job based on snp count",
"elif",
"snp_count",
"<",
"max_snp_count",
":",
"snp_count",
"+=",
"1",
"else",
":",
"job_string",
"+=",
"\" --to-bp %d\"",
"%",
"(",
"last_pos",
")",
"job_name",
"+=",
"str",
"(",
"last_pos",
")",
"job_list",
"[",
"job_name",
"]",
"=",
"job_string",
"job_string",
"=",
"\"--chr %d --from-bp\"",
"%",
"(",
"chrom",
",",
"pos",
")",
"job_name",
"=",
"\"Chr%d_%d-\"",
"%",
"(",
"chrom",
",",
"pos",
")",
"snp_count",
"=",
"0",
"last_pos",
"=",
"pos",
"if",
"job_string",
"!=",
"\"\"",
":",
"job_string",
"+=",
"\" --to-bp %d\"",
"%",
"(",
"last_pos",
")",
"job_name",
"+=",
"str",
"(",
"last_pos",
")",
"job_list",
"[",
"job_name",
"]",
"=",
"job_string",
"return",
"job_list"
] |
Split up GWAS jobs based on portions of a chromosome
:param args: arguments from parseargs
:param chrom_file: marker info file
:return dictionary name=>job_details
|
[
"Split",
"up",
"GWAS",
"jobs",
"based",
"on",
"portions",
"of",
"a",
"chromosome"
] |
fe8cf627464ef59d68b7eda628a19840d033882f
|
https://github.com/edwards-lab/MVtest/blob/fe8cf627464ef59d68b7eda628a19840d033882f/scripts/mvmany.py#L243-L300
|
244,959
|
CS207-Final-Project-Group-10/cs207-FinalProject
|
fluxions/fluxion_node.py
|
Fluxion.diff
|
def diff(self, *args):
"""Call forward_mode; discard value, only keep the derivative."""
arg_dicts = self._parse_args_forward_mode(*args)
val, diff = self._forward_mode(*arg_dicts)
return diff
|
python
|
def diff(self, *args):
"""Call forward_mode; discard value, only keep the derivative."""
arg_dicts = self._parse_args_forward_mode(*args)
val, diff = self._forward_mode(*arg_dicts)
return diff
|
[
"def",
"diff",
"(",
"self",
",",
"*",
"args",
")",
":",
"arg_dicts",
"=",
"self",
".",
"_parse_args_forward_mode",
"(",
"*",
"args",
")",
"val",
",",
"diff",
"=",
"self",
".",
"_forward_mode",
"(",
"*",
"arg_dicts",
")",
"return",
"diff"
] |
Call forward_mode; discard value, only keep the derivative.
|
[
"Call",
"forward_mode",
";",
"discard",
"value",
"only",
"keep",
"the",
"derivative",
"."
] |
842e9c2d3ca1490cef18c086dfde81856d8d3a82
|
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/fluxions/fluxion_node.py#L58-L62
|
244,960
|
CS207-Final-Project-Group-10/cs207-FinalProject
|
fluxions/fluxion_node.py
|
Fluxion._check_forward_mode_input_dict
|
def _check_forward_mode_input_dict(self, var_tbl: dict) -> int:
"""
Check whether one forward mode input dict has elements of valid shape
Returns inferred value of T
"""
T: int = 1
for var_name in var_tbl:
# The bound value to this variable name
val = var_tbl[var_name]
# case 1: this is a scalar; T=1
if isinstance(val, scalar_instance_types):
t = 1
# case 2: this is an array; calulate T
elif isinstance(val, np.ndarray):
t = self._calc_T_var(val)
#case 3: throw an error
else:
raise ValueError(f'val={val} in var_tbl; {type(val)} not a recognized value type.')
#update T
if t > 1 and T == 1:
T = t
elif t not in (1,T):
raise ValueError(f'Bound variable {var_name} has inconsistent shape')
return T
|
python
|
def _check_forward_mode_input_dict(self, var_tbl: dict) -> int:
"""
Check whether one forward mode input dict has elements of valid shape
Returns inferred value of T
"""
T: int = 1
for var_name in var_tbl:
# The bound value to this variable name
val = var_tbl[var_name]
# case 1: this is a scalar; T=1
if isinstance(val, scalar_instance_types):
t = 1
# case 2: this is an array; calulate T
elif isinstance(val, np.ndarray):
t = self._calc_T_var(val)
#case 3: throw an error
else:
raise ValueError(f'val={val} in var_tbl; {type(val)} not a recognized value type.')
#update T
if t > 1 and T == 1:
T = t
elif t not in (1,T):
raise ValueError(f'Bound variable {var_name} has inconsistent shape')
return T
|
[
"def",
"_check_forward_mode_input_dict",
"(",
"self",
",",
"var_tbl",
":",
"dict",
")",
"->",
"int",
":",
"T",
":",
"int",
"=",
"1",
"for",
"var_name",
"in",
"var_tbl",
":",
"# The bound value to this variable name",
"val",
"=",
"var_tbl",
"[",
"var_name",
"]",
"# case 1: this is a scalar; T=1",
"if",
"isinstance",
"(",
"val",
",",
"scalar_instance_types",
")",
":",
"t",
"=",
"1",
"# case 2: this is an array; calulate T",
"elif",
"isinstance",
"(",
"val",
",",
"np",
".",
"ndarray",
")",
":",
"t",
"=",
"self",
".",
"_calc_T_var",
"(",
"val",
")",
"#case 3: throw an error",
"else",
":",
"raise",
"ValueError",
"(",
"f'val={val} in var_tbl; {type(val)} not a recognized value type.'",
")",
"#update T",
"if",
"t",
">",
"1",
"and",
"T",
"==",
"1",
":",
"T",
"=",
"t",
"elif",
"t",
"not",
"in",
"(",
"1",
",",
"T",
")",
":",
"raise",
"ValueError",
"(",
"f'Bound variable {var_name} has inconsistent shape'",
")",
"return",
"T"
] |
Check whether one forward mode input dict has elements of valid shape
Returns inferred value of T
|
[
"Check",
"whether",
"one",
"forward",
"mode",
"input",
"dict",
"has",
"elements",
"of",
"valid",
"shape",
"Returns",
"inferred",
"value",
"of",
"T"
] |
842e9c2d3ca1490cef18c086dfde81856d8d3a82
|
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/fluxions/fluxion_node.py#L250-L273
|
244,961
|
CS207-Final-Project-Group-10/cs207-FinalProject
|
fluxions/fluxion_node.py
|
Fluxion._check_forward_mode_input_array
|
def _check_forward_mode_input_array(self, X: np.ndarray) -> int:
"""
Check whether one forward mode input array is of valid shape
Returns inferred value of T
"""
# Find the length of each variable to infer T
if not isinstance(X, np.ndarray):
raise ValueError('X must be a numpy array, dict, or scalar')
# Get the shape and tensor rank
shape = X.shape
tensor_rank = len(shape)
T = 0
# Only 1D and 2D arrays are supported
if tensor_rank not in (0, 1, 2):
raise ValueError(f'Shape of X = {X.shape}. Numpy array must be a 1D vector or 2D matrix')
if tensor_rank == 0:
T = 1
# If the input was a 1D vector, its length must EITHER (1) be T, or (2) m, with T == 1
if tensor_rank == 1 and (shape[0] != self.m) and self.m != 1:
raise ValueError(f'Error: X has shape {X.shape}, incompatible with m = {self.m} on fluxion.')
# Return the value of T in this situation
if tensor_rank == 1 and shape[0] == self.m:
T = 1
if tensor_rank == 1 and self.m == 1:
T = shape[0]
# If the input was a 2D vector, it must be of shape Txn
if tensor_rank == 2 and (shape[1] != self.m):
raise ValueError(f'Error: X has shape {X.shape}, incompatible with m = {self.m} on fluxion.')
if tensor_rank == 2:
T = shape[0]
return T
|
python
|
def _check_forward_mode_input_array(self, X: np.ndarray) -> int:
"""
Check whether one forward mode input array is of valid shape
Returns inferred value of T
"""
# Find the length of each variable to infer T
if not isinstance(X, np.ndarray):
raise ValueError('X must be a numpy array, dict, or scalar')
# Get the shape and tensor rank
shape = X.shape
tensor_rank = len(shape)
T = 0
# Only 1D and 2D arrays are supported
if tensor_rank not in (0, 1, 2):
raise ValueError(f'Shape of X = {X.shape}. Numpy array must be a 1D vector or 2D matrix')
if tensor_rank == 0:
T = 1
# If the input was a 1D vector, its length must EITHER (1) be T, or (2) m, with T == 1
if tensor_rank == 1 and (shape[0] != self.m) and self.m != 1:
raise ValueError(f'Error: X has shape {X.shape}, incompatible with m = {self.m} on fluxion.')
# Return the value of T in this situation
if tensor_rank == 1 and shape[0] == self.m:
T = 1
if tensor_rank == 1 and self.m == 1:
T = shape[0]
# If the input was a 2D vector, it must be of shape Txn
if tensor_rank == 2 and (shape[1] != self.m):
raise ValueError(f'Error: X has shape {X.shape}, incompatible with m = {self.m} on fluxion.')
if tensor_rank == 2:
T = shape[0]
return T
|
[
"def",
"_check_forward_mode_input_array",
"(",
"self",
",",
"X",
":",
"np",
".",
"ndarray",
")",
"->",
"int",
":",
"# Find the length of each variable to infer T",
"if",
"not",
"isinstance",
"(",
"X",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"ValueError",
"(",
"'X must be a numpy array, dict, or scalar'",
")",
"# Get the shape and tensor rank",
"shape",
"=",
"X",
".",
"shape",
"tensor_rank",
"=",
"len",
"(",
"shape",
")",
"T",
"=",
"0",
"# Only 1D and 2D arrays are supported",
"if",
"tensor_rank",
"not",
"in",
"(",
"0",
",",
"1",
",",
"2",
")",
":",
"raise",
"ValueError",
"(",
"f'Shape of X = {X.shape}. Numpy array must be a 1D vector or 2D matrix'",
")",
"if",
"tensor_rank",
"==",
"0",
":",
"T",
"=",
"1",
"# If the input was a 1D vector, its length must EITHER (1) be T, or (2) m, with T == 1",
"if",
"tensor_rank",
"==",
"1",
"and",
"(",
"shape",
"[",
"0",
"]",
"!=",
"self",
".",
"m",
")",
"and",
"self",
".",
"m",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"f'Error: X has shape {X.shape}, incompatible with m = {self.m} on fluxion.'",
")",
"# Return the value of T in this situation",
"if",
"tensor_rank",
"==",
"1",
"and",
"shape",
"[",
"0",
"]",
"==",
"self",
".",
"m",
":",
"T",
"=",
"1",
"if",
"tensor_rank",
"==",
"1",
"and",
"self",
".",
"m",
"==",
"1",
":",
"T",
"=",
"shape",
"[",
"0",
"]",
"# If the input was a 2D vector, it must be of shape Txn",
"if",
"tensor_rank",
"==",
"2",
"and",
"(",
"shape",
"[",
"1",
"]",
"!=",
"self",
".",
"m",
")",
":",
"raise",
"ValueError",
"(",
"f'Error: X has shape {X.shape}, incompatible with m = {self.m} on fluxion.'",
")",
"if",
"tensor_rank",
"==",
"2",
":",
"T",
"=",
"shape",
"[",
"0",
"]",
"return",
"T"
] |
Check whether one forward mode input array is of valid shape
Returns inferred value of T
|
[
"Check",
"whether",
"one",
"forward",
"mode",
"input",
"array",
"is",
"of",
"valid",
"shape",
"Returns",
"inferred",
"value",
"of",
"T"
] |
842e9c2d3ca1490cef18c086dfde81856d8d3a82
|
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/fluxions/fluxion_node.py#L275-L305
|
244,962
|
CS207-Final-Project-Group-10/cs207-FinalProject
|
fluxions/fluxion_node.py
|
Fluxion._calc_T_var
|
def _calc_T_var(self,X) -> int:
"""Calculate the number of samples, T, from the shape of X"""
shape = X.shape
tensor_rank: int = len(shape)
if tensor_rank == 0:
return 1
if tensor_rank == 1:
return shape[0]
if tensor_rank == 2:
if shape[1] > 1:
raise ValueError('Initial value of a variable must have dimension T*1.')
return shape[0]
|
python
|
def _calc_T_var(self,X) -> int:
"""Calculate the number of samples, T, from the shape of X"""
shape = X.shape
tensor_rank: int = len(shape)
if tensor_rank == 0:
return 1
if tensor_rank == 1:
return shape[0]
if tensor_rank == 2:
if shape[1] > 1:
raise ValueError('Initial value of a variable must have dimension T*1.')
return shape[0]
|
[
"def",
"_calc_T_var",
"(",
"self",
",",
"X",
")",
"->",
"int",
":",
"shape",
"=",
"X",
".",
"shape",
"tensor_rank",
":",
"int",
"=",
"len",
"(",
"shape",
")",
"if",
"tensor_rank",
"==",
"0",
":",
"return",
"1",
"if",
"tensor_rank",
"==",
"1",
":",
"return",
"shape",
"[",
"0",
"]",
"if",
"tensor_rank",
"==",
"2",
":",
"if",
"shape",
"[",
"1",
"]",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Initial value of a variable must have dimension T*1.'",
")",
"return",
"shape",
"[",
"0",
"]"
] |
Calculate the number of samples, T, from the shape of X
|
[
"Calculate",
"the",
"number",
"of",
"samples",
"T",
"from",
"the",
"shape",
"of",
"X"
] |
842e9c2d3ca1490cef18c086dfde81856d8d3a82
|
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/fluxions/fluxion_node.py#L307-L318
|
244,963
|
CS207-Final-Project-Group-10/cs207-FinalProject
|
fluxions/fluxion_node.py
|
Addition._forward_mode
|
def _forward_mode(self, *args):
"""Forward mode differentiation for a sum"""
# (f+g)(x) = f(x) + g(x)
f_val, f_diff = self.f._forward_mode(*args)
g_val, g_diff = self.g._forward_mode(*args)
# The function value and derivative is the sum of f and g
val = f_val + g_val
diff = f_diff + g_diff
return val, diff
|
python
|
def _forward_mode(self, *args):
"""Forward mode differentiation for a sum"""
# (f+g)(x) = f(x) + g(x)
f_val, f_diff = self.f._forward_mode(*args)
g_val, g_diff = self.g._forward_mode(*args)
# The function value and derivative is the sum of f and g
val = f_val + g_val
diff = f_diff + g_diff
return val, diff
|
[
"def",
"_forward_mode",
"(",
"self",
",",
"*",
"args",
")",
":",
"# (f+g)(x) = f(x) + g(x)",
"f_val",
",",
"f_diff",
"=",
"self",
".",
"f",
".",
"_forward_mode",
"(",
"*",
"args",
")",
"g_val",
",",
"g_diff",
"=",
"self",
".",
"g",
".",
"_forward_mode",
"(",
"*",
"args",
")",
"# The function value and derivative is the sum of f and g",
"val",
"=",
"f_val",
"+",
"g_val",
"diff",
"=",
"f_diff",
"+",
"g_diff",
"return",
"val",
",",
"diff"
] |
Forward mode differentiation for a sum
|
[
"Forward",
"mode",
"differentiation",
"for",
"a",
"sum"
] |
842e9c2d3ca1490cef18c086dfde81856d8d3a82
|
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/fluxions/fluxion_node.py#L473-L481
|
244,964
|
CS207-Final-Project-Group-10/cs207-FinalProject
|
fluxions/fluxion_node.py
|
Var._forward_mode
|
def _forward_mode(self, *args):
"""Forward mode differentiation for variables"""
# Parse arguments into two numpy arrays
X: np.ndarray
dX: np.ndarray
X, dX = self._parse_dicts(*args)
# The value is X
if X is not None:
val = X
else:
val = self.X
# The derivative is the seed dX
if dX is not None:
diff = dX
else:
diff = np.ones_like(val)
# Return both arrays
return (val, diff)
|
python
|
def _forward_mode(self, *args):
"""Forward mode differentiation for variables"""
# Parse arguments into two numpy arrays
X: np.ndarray
dX: np.ndarray
X, dX = self._parse_dicts(*args)
# The value is X
if X is not None:
val = X
else:
val = self.X
# The derivative is the seed dX
if dX is not None:
diff = dX
else:
diff = np.ones_like(val)
# Return both arrays
return (val, diff)
|
[
"def",
"_forward_mode",
"(",
"self",
",",
"*",
"args",
")",
":",
"# Parse arguments into two numpy arrays",
"X",
":",
"np",
".",
"ndarray",
"dX",
":",
"np",
".",
"ndarray",
"X",
",",
"dX",
"=",
"self",
".",
"_parse_dicts",
"(",
"*",
"args",
")",
"# The value is X",
"if",
"X",
"is",
"not",
"None",
":",
"val",
"=",
"X",
"else",
":",
"val",
"=",
"self",
".",
"X",
"# The derivative is the seed dX",
"if",
"dX",
"is",
"not",
"None",
":",
"diff",
"=",
"dX",
"else",
":",
"diff",
"=",
"np",
".",
"ones_like",
"(",
"val",
")",
"# Return both arrays",
"return",
"(",
"val",
",",
"diff",
")"
] |
Forward mode differentiation for variables
|
[
"Forward",
"mode",
"differentiation",
"for",
"variables"
] |
842e9c2d3ca1490cef18c086dfde81856d8d3a82
|
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/fluxions/fluxion_node.py#L580-L597
|
244,965
|
inveniosoftware-attic/invenio-upgrader
|
invenio_upgrader/cli.py
|
run
|
def run():
"""Command for applying upgrades."""
logfilename = os.path.join(current_app.config['CFG_LOGDIR'],
'invenio_upgrader.log')
upgrader = InvenioUpgrader()
logger = upgrader.get_logger(logfilename=logfilename)
try:
upgrades = upgrader.get_upgrades()
if not upgrades:
logger.info("All upgrades have been applied.")
return
logger.info("Following upgrade(s) will be applied:")
for u in upgrades:
logger.info(" * %s (%s)" % (u.name, u.info))
logger.info("Running pre-upgrade checks...")
upgrader.pre_upgrade_checks(upgrades)
logger.info("Calculating estimated upgrade time...")
estimate = upgrader.human_estimate(upgrades)
click.confirm(
"You are going to upgrade your installation "
"(estimated time: {0})!".format(estimate), abort=True)
for u in upgrades:
logger.info("Applying %s (%s)" % (u.name, u.info))
upgrader.apply_upgrade(u)
logger.info("Running post-upgrade checks...")
upgrader.post_upgrade_checks(upgrades)
if upgrader.has_warnings():
logger.warning("Upgrade completed with %s warnings - please check "
"log-file for further information:\nless %s"
% (upgrader.get_warnings_count(), logfilename))
else:
logger.info("Upgrade completed successfully.")
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
logger.info("Please check log file for further information:\n"
"less %s" % logfilename)
click.Abort()
|
python
|
def run():
"""Command for applying upgrades."""
logfilename = os.path.join(current_app.config['CFG_LOGDIR'],
'invenio_upgrader.log')
upgrader = InvenioUpgrader()
logger = upgrader.get_logger(logfilename=logfilename)
try:
upgrades = upgrader.get_upgrades()
if not upgrades:
logger.info("All upgrades have been applied.")
return
logger.info("Following upgrade(s) will be applied:")
for u in upgrades:
logger.info(" * %s (%s)" % (u.name, u.info))
logger.info("Running pre-upgrade checks...")
upgrader.pre_upgrade_checks(upgrades)
logger.info("Calculating estimated upgrade time...")
estimate = upgrader.human_estimate(upgrades)
click.confirm(
"You are going to upgrade your installation "
"(estimated time: {0})!".format(estimate), abort=True)
for u in upgrades:
logger.info("Applying %s (%s)" % (u.name, u.info))
upgrader.apply_upgrade(u)
logger.info("Running post-upgrade checks...")
upgrader.post_upgrade_checks(upgrades)
if upgrader.has_warnings():
logger.warning("Upgrade completed with %s warnings - please check "
"log-file for further information:\nless %s"
% (upgrader.get_warnings_count(), logfilename))
else:
logger.info("Upgrade completed successfully.")
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
logger.info("Please check log file for further information:\n"
"less %s" % logfilename)
click.Abort()
|
[
"def",
"run",
"(",
")",
":",
"logfilename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"current_app",
".",
"config",
"[",
"'CFG_LOGDIR'",
"]",
",",
"'invenio_upgrader.log'",
")",
"upgrader",
"=",
"InvenioUpgrader",
"(",
")",
"logger",
"=",
"upgrader",
".",
"get_logger",
"(",
"logfilename",
"=",
"logfilename",
")",
"try",
":",
"upgrades",
"=",
"upgrader",
".",
"get_upgrades",
"(",
")",
"if",
"not",
"upgrades",
":",
"logger",
".",
"info",
"(",
"\"All upgrades have been applied.\"",
")",
"return",
"logger",
".",
"info",
"(",
"\"Following upgrade(s) will be applied:\"",
")",
"for",
"u",
"in",
"upgrades",
":",
"logger",
".",
"info",
"(",
"\" * %s (%s)\"",
"%",
"(",
"u",
".",
"name",
",",
"u",
".",
"info",
")",
")",
"logger",
".",
"info",
"(",
"\"Running pre-upgrade checks...\"",
")",
"upgrader",
".",
"pre_upgrade_checks",
"(",
"upgrades",
")",
"logger",
".",
"info",
"(",
"\"Calculating estimated upgrade time...\"",
")",
"estimate",
"=",
"upgrader",
".",
"human_estimate",
"(",
"upgrades",
")",
"click",
".",
"confirm",
"(",
"\"You are going to upgrade your installation \"",
"\"(estimated time: {0})!\"",
".",
"format",
"(",
"estimate",
")",
",",
"abort",
"=",
"True",
")",
"for",
"u",
"in",
"upgrades",
":",
"logger",
".",
"info",
"(",
"\"Applying %s (%s)\"",
"%",
"(",
"u",
".",
"name",
",",
"u",
".",
"info",
")",
")",
"upgrader",
".",
"apply_upgrade",
"(",
"u",
")",
"logger",
".",
"info",
"(",
"\"Running post-upgrade checks...\"",
")",
"upgrader",
".",
"post_upgrade_checks",
"(",
"upgrades",
")",
"if",
"upgrader",
".",
"has_warnings",
"(",
")",
":",
"logger",
".",
"warning",
"(",
"\"Upgrade completed with %s warnings - please check \"",
"\"log-file for further information:\\nless %s\"",
"%",
"(",
"upgrader",
".",
"get_warnings_count",
"(",
")",
",",
"logfilename",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Upgrade completed successfully.\"",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"for",
"msg",
"in",
"e",
".",
"args",
":",
"logger",
".",
"error",
"(",
"unicode",
"(",
"msg",
")",
")",
"logger",
".",
"info",
"(",
"\"Please check log file for further information:\\n\"",
"\"less %s\"",
"%",
"logfilename",
")",
"click",
".",
"Abort",
"(",
")"
] |
Command for applying upgrades.
|
[
"Command",
"for",
"applying",
"upgrades",
"."
] |
cee4bcb118515463ecf6de1421642007f79a9fcd
|
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/cli.py#L99-L146
|
244,966
|
inveniosoftware-attic/invenio-upgrader
|
invenio_upgrader/cli.py
|
check
|
def check():
"""Command for checking upgrades."""
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
# Run upgrade pre-checks
upgrades = upgrader.get_upgrades()
# Check if there's anything to upgrade
if not upgrades:
logger.info("All upgrades have been applied.")
return
logger.info("Following upgrade(s) have not been applied yet:")
for u in upgrades:
logger.info(
" * {0} {1}".format(u.name, u.info))
logger.info("Running pre-upgrade checks...")
upgrader.pre_upgrade_checks(upgrades)
logger.info("Upgrade check successful - estimated time for upgrading"
" Invenio is %s..." % upgrader.human_estimate(upgrades))
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
logger.error("Upgrade check failed. Aborting.")
raise
|
python
|
def check():
"""Command for checking upgrades."""
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
# Run upgrade pre-checks
upgrades = upgrader.get_upgrades()
# Check if there's anything to upgrade
if not upgrades:
logger.info("All upgrades have been applied.")
return
logger.info("Following upgrade(s) have not been applied yet:")
for u in upgrades:
logger.info(
" * {0} {1}".format(u.name, u.info))
logger.info("Running pre-upgrade checks...")
upgrader.pre_upgrade_checks(upgrades)
logger.info("Upgrade check successful - estimated time for upgrading"
" Invenio is %s..." % upgrader.human_estimate(upgrades))
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
logger.error("Upgrade check failed. Aborting.")
raise
|
[
"def",
"check",
"(",
")",
":",
"upgrader",
"=",
"InvenioUpgrader",
"(",
")",
"logger",
"=",
"upgrader",
".",
"get_logger",
"(",
")",
"try",
":",
"# Run upgrade pre-checks",
"upgrades",
"=",
"upgrader",
".",
"get_upgrades",
"(",
")",
"# Check if there's anything to upgrade",
"if",
"not",
"upgrades",
":",
"logger",
".",
"info",
"(",
"\"All upgrades have been applied.\"",
")",
"return",
"logger",
".",
"info",
"(",
"\"Following upgrade(s) have not been applied yet:\"",
")",
"for",
"u",
"in",
"upgrades",
":",
"logger",
".",
"info",
"(",
"\" * {0} {1}\"",
".",
"format",
"(",
"u",
".",
"name",
",",
"u",
".",
"info",
")",
")",
"logger",
".",
"info",
"(",
"\"Running pre-upgrade checks...\"",
")",
"upgrader",
".",
"pre_upgrade_checks",
"(",
"upgrades",
")",
"logger",
".",
"info",
"(",
"\"Upgrade check successful - estimated time for upgrading\"",
"\" Invenio is %s...\"",
"%",
"upgrader",
".",
"human_estimate",
"(",
"upgrades",
")",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"for",
"msg",
"in",
"e",
".",
"args",
":",
"logger",
".",
"error",
"(",
"unicode",
"(",
"msg",
")",
")",
"logger",
".",
"error",
"(",
"\"Upgrade check failed. Aborting.\"",
")",
"raise"
] |
Command for checking upgrades.
|
[
"Command",
"for",
"checking",
"upgrades",
"."
] |
cee4bcb118515463ecf6de1421642007f79a9fcd
|
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/cli.py#L151-L178
|
244,967
|
inveniosoftware-attic/invenio-upgrader
|
invenio_upgrader/cli.py
|
pending
|
def pending():
"""Command for showing upgrades ready to be applied."""
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
upgrades = upgrader.get_upgrades()
if not upgrades:
logger.info("All upgrades have been applied.")
return
logger.info("Following upgrade(s) are ready to be applied:")
for u in upgrades:
logger.info(
" * {0} {1}".format(u.name, u.info))
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
raise
|
python
|
def pending():
"""Command for showing upgrades ready to be applied."""
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
upgrades = upgrader.get_upgrades()
if not upgrades:
logger.info("All upgrades have been applied.")
return
logger.info("Following upgrade(s) are ready to be applied:")
for u in upgrades:
logger.info(
" * {0} {1}".format(u.name, u.info))
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
raise
|
[
"def",
"pending",
"(",
")",
":",
"upgrader",
"=",
"InvenioUpgrader",
"(",
")",
"logger",
"=",
"upgrader",
".",
"get_logger",
"(",
")",
"try",
":",
"upgrades",
"=",
"upgrader",
".",
"get_upgrades",
"(",
")",
"if",
"not",
"upgrades",
":",
"logger",
".",
"info",
"(",
"\"All upgrades have been applied.\"",
")",
"return",
"logger",
".",
"info",
"(",
"\"Following upgrade(s) are ready to be applied:\"",
")",
"for",
"u",
"in",
"upgrades",
":",
"logger",
".",
"info",
"(",
"\" * {0} {1}\"",
".",
"format",
"(",
"u",
".",
"name",
",",
"u",
".",
"info",
")",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"for",
"msg",
"in",
"e",
".",
"args",
":",
"logger",
".",
"error",
"(",
"unicode",
"(",
"msg",
")",
")",
"raise"
] |
Command for showing upgrades ready to be applied.
|
[
"Command",
"for",
"showing",
"upgrades",
"ready",
"to",
"be",
"applied",
"."
] |
cee4bcb118515463ecf6de1421642007f79a9fcd
|
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/cli.py#L183-L203
|
244,968
|
inveniosoftware-attic/invenio-upgrader
|
invenio_upgrader/cli.py
|
applied
|
def applied():
"""Command for showing all upgrades already applied."""
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
upgrades = upgrader.get_history()
if not upgrades:
logger.info("No upgrades have been applied.")
return
logger.info("Following upgrade(s) have been applied:")
for u_id, applied in upgrades:
logger.info(" * %s (%s)" % (u_id, applied))
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
raise
|
python
|
def applied():
"""Command for showing all upgrades already applied."""
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
upgrades = upgrader.get_history()
if not upgrades:
logger.info("No upgrades have been applied.")
return
logger.info("Following upgrade(s) have been applied:")
for u_id, applied in upgrades:
logger.info(" * %s (%s)" % (u_id, applied))
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
raise
|
[
"def",
"applied",
"(",
")",
":",
"upgrader",
"=",
"InvenioUpgrader",
"(",
")",
"logger",
"=",
"upgrader",
".",
"get_logger",
"(",
")",
"try",
":",
"upgrades",
"=",
"upgrader",
".",
"get_history",
"(",
")",
"if",
"not",
"upgrades",
":",
"logger",
".",
"info",
"(",
"\"No upgrades have been applied.\"",
")",
"return",
"logger",
".",
"info",
"(",
"\"Following upgrade(s) have been applied:\"",
")",
"for",
"u_id",
",",
"applied",
"in",
"upgrades",
":",
"logger",
".",
"info",
"(",
"\" * %s (%s)\"",
"%",
"(",
"u_id",
",",
"applied",
")",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"for",
"msg",
"in",
"e",
".",
"args",
":",
"logger",
".",
"error",
"(",
"unicode",
"(",
"msg",
")",
")",
"raise"
] |
Command for showing all upgrades already applied.
|
[
"Command",
"for",
"showing",
"all",
"upgrades",
"already",
"applied",
"."
] |
cee4bcb118515463ecf6de1421642007f79a9fcd
|
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/cli.py#L208-L227
|
244,969
|
inveniosoftware-attic/invenio-upgrader
|
invenio_upgrader/cli.py
|
release
|
def release(path, repository):
"""Create a new release upgrade recipe, for developers."""
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
endpoints = upgrader.find_endpoints()
if not endpoints:
logger.error("No upgrades found.")
click.Abort()
depends_on = []
for repo, upgrades in endpoints.items():
depends_on.extend(upgrades)
return recipe(path,
repository=repository,
depends_on=depends_on,
release=True,
output_path=output_path)
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
raise
|
python
|
def release(path, repository):
"""Create a new release upgrade recipe, for developers."""
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
endpoints = upgrader.find_endpoints()
if not endpoints:
logger.error("No upgrades found.")
click.Abort()
depends_on = []
for repo, upgrades in endpoints.items():
depends_on.extend(upgrades)
return recipe(path,
repository=repository,
depends_on=depends_on,
release=True,
output_path=output_path)
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
raise
|
[
"def",
"release",
"(",
"path",
",",
"repository",
")",
":",
"upgrader",
"=",
"InvenioUpgrader",
"(",
")",
"logger",
"=",
"upgrader",
".",
"get_logger",
"(",
")",
"try",
":",
"endpoints",
"=",
"upgrader",
".",
"find_endpoints",
"(",
")",
"if",
"not",
"endpoints",
":",
"logger",
".",
"error",
"(",
"\"No upgrades found.\"",
")",
"click",
".",
"Abort",
"(",
")",
"depends_on",
"=",
"[",
"]",
"for",
"repo",
",",
"upgrades",
"in",
"endpoints",
".",
"items",
"(",
")",
":",
"depends_on",
".",
"extend",
"(",
"upgrades",
")",
"return",
"recipe",
"(",
"path",
",",
"repository",
"=",
"repository",
",",
"depends_on",
"=",
"depends_on",
",",
"release",
"=",
"True",
",",
"output_path",
"=",
"output_path",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"for",
"msg",
"in",
"e",
".",
"args",
":",
"logger",
".",
"error",
"(",
"unicode",
"(",
"msg",
")",
")",
"raise"
] |
Create a new release upgrade recipe, for developers.
|
[
"Create",
"a",
"new",
"release",
"upgrade",
"recipe",
"for",
"developers",
"."
] |
cee4bcb118515463ecf6de1421642007f79a9fcd
|
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/cli.py#L234-L259
|
244,970
|
inveniosoftware-attic/invenio-upgrader
|
invenio_upgrader/cli.py
|
recipe
|
def recipe(package, repository=None, depends_on=None, release=False,
output_path=None, auto=False, overwrite=False, name=None):
"""Create a new upgrade recipe, for developers."""
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
path, found_repository = _upgrade_recipe_find_path(package)
if output_path:
path = output_path
if not repository:
repository = found_repository
if not os.path.exists(path):
raise RuntimeError("Path does not exists: %s" % path)
if not os.path.isdir(path):
raise RuntimeError("Path is not a directory: %s" % path)
# Generate upgrade filename
if release:
filename = "%s_release_x_y_z.py" % repository
else:
filename = "%s_%s_%s.py" % (repository,
date.today().strftime("%Y_%m_%d"),
name or 'rename_me')
# Check if generated repository name can be parsed
test_repository = upgrader._parse_plugin_id(filename[:-3])
if repository != test_repository:
raise RuntimeError(
"Generated repository name cannot be parsed. "
"Please override it with --repository option."
)
upgrade_file = os.path.join(path, filename)
if os.path.exists(upgrade_file) and not overwrite:
raise RuntimeError(
"Could not generate upgrade - %s already exists."
% upgrade_file
)
# Determine latest installed upgrade
if depends_on is None:
depends_on = ["CHANGE_ME"]
u = upgrader.latest_applied_upgrade(repository=repository)
if u:
depends_on = [u]
# Write upgrade template file
_write_template(
upgrade_file, name or 'rename_me',
depends_on, repository, auto=auto)
logger.info("Created new upgrade %s" % upgrade_file)
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
raise
|
python
|
def recipe(package, repository=None, depends_on=None, release=False,
output_path=None, auto=False, overwrite=False, name=None):
"""Create a new upgrade recipe, for developers."""
upgrader = InvenioUpgrader()
logger = upgrader.get_logger()
try:
path, found_repository = _upgrade_recipe_find_path(package)
if output_path:
path = output_path
if not repository:
repository = found_repository
if not os.path.exists(path):
raise RuntimeError("Path does not exists: %s" % path)
if not os.path.isdir(path):
raise RuntimeError("Path is not a directory: %s" % path)
# Generate upgrade filename
if release:
filename = "%s_release_x_y_z.py" % repository
else:
filename = "%s_%s_%s.py" % (repository,
date.today().strftime("%Y_%m_%d"),
name or 'rename_me')
# Check if generated repository name can be parsed
test_repository = upgrader._parse_plugin_id(filename[:-3])
if repository != test_repository:
raise RuntimeError(
"Generated repository name cannot be parsed. "
"Please override it with --repository option."
)
upgrade_file = os.path.join(path, filename)
if os.path.exists(upgrade_file) and not overwrite:
raise RuntimeError(
"Could not generate upgrade - %s already exists."
% upgrade_file
)
# Determine latest installed upgrade
if depends_on is None:
depends_on = ["CHANGE_ME"]
u = upgrader.latest_applied_upgrade(repository=repository)
if u:
depends_on = [u]
# Write upgrade template file
_write_template(
upgrade_file, name or 'rename_me',
depends_on, repository, auto=auto)
logger.info("Created new upgrade %s" % upgrade_file)
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
raise
|
[
"def",
"recipe",
"(",
"package",
",",
"repository",
"=",
"None",
",",
"depends_on",
"=",
"None",
",",
"release",
"=",
"False",
",",
"output_path",
"=",
"None",
",",
"auto",
"=",
"False",
",",
"overwrite",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"upgrader",
"=",
"InvenioUpgrader",
"(",
")",
"logger",
"=",
"upgrader",
".",
"get_logger",
"(",
")",
"try",
":",
"path",
",",
"found_repository",
"=",
"_upgrade_recipe_find_path",
"(",
"package",
")",
"if",
"output_path",
":",
"path",
"=",
"output_path",
"if",
"not",
"repository",
":",
"repository",
"=",
"found_repository",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Path does not exists: %s\"",
"%",
"path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Path is not a directory: %s\"",
"%",
"path",
")",
"# Generate upgrade filename",
"if",
"release",
":",
"filename",
"=",
"\"%s_release_x_y_z.py\"",
"%",
"repository",
"else",
":",
"filename",
"=",
"\"%s_%s_%s.py\"",
"%",
"(",
"repository",
",",
"date",
".",
"today",
"(",
")",
".",
"strftime",
"(",
"\"%Y_%m_%d\"",
")",
",",
"name",
"or",
"'rename_me'",
")",
"# Check if generated repository name can be parsed",
"test_repository",
"=",
"upgrader",
".",
"_parse_plugin_id",
"(",
"filename",
"[",
":",
"-",
"3",
"]",
")",
"if",
"repository",
"!=",
"test_repository",
":",
"raise",
"RuntimeError",
"(",
"\"Generated repository name cannot be parsed. \"",
"\"Please override it with --repository option.\"",
")",
"upgrade_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"upgrade_file",
")",
"and",
"not",
"overwrite",
":",
"raise",
"RuntimeError",
"(",
"\"Could not generate upgrade - %s already exists.\"",
"%",
"upgrade_file",
")",
"# Determine latest installed upgrade",
"if",
"depends_on",
"is",
"None",
":",
"depends_on",
"=",
"[",
"\"CHANGE_ME\"",
"]",
"u",
"=",
"upgrader",
".",
"latest_applied_upgrade",
"(",
"repository",
"=",
"repository",
")",
"if",
"u",
":",
"depends_on",
"=",
"[",
"u",
"]",
"# Write upgrade template file",
"_write_template",
"(",
"upgrade_file",
",",
"name",
"or",
"'rename_me'",
",",
"depends_on",
",",
"repository",
",",
"auto",
"=",
"auto",
")",
"logger",
".",
"info",
"(",
"\"Created new upgrade %s\"",
"%",
"upgrade_file",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"for",
"msg",
"in",
"e",
".",
"args",
":",
"logger",
".",
"error",
"(",
"unicode",
"(",
"msg",
")",
")",
"raise"
] |
Create a new upgrade recipe, for developers.
|
[
"Create",
"a",
"new",
"upgrade",
"recipe",
"for",
"developers",
"."
] |
cee4bcb118515463ecf6de1421642007f79a9fcd
|
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/cli.py#L274-L335
|
244,971
|
inveniosoftware-attic/invenio-upgrader
|
invenio_upgrader/cli.py
|
_write_template
|
def _write_template(upgrade_file, name, depends_on, repository, auto=False):
"""Write template to upgrade file."""
if auto:
# Ensure all models are loaded
from invenio_db import models
list(models)
template_args = produce_upgrade_operations()
operations_str = template_args['upgrades']
import_str = template_args['imports']
else:
operations_str = " pass"
import_str = ""
with open(upgrade_file, 'w') as f:
f.write(UPGRADE_TEMPLATE % {
'depends_on': depends_on,
'repository': repository,
'year': date.today().year,
'operations': operations_str,
'imports': import_str,
'cls': ''.join(w.capitalize() or '_' for w in name.split('_'))
})
|
python
|
def _write_template(upgrade_file, name, depends_on, repository, auto=False):
"""Write template to upgrade file."""
if auto:
# Ensure all models are loaded
from invenio_db import models
list(models)
template_args = produce_upgrade_operations()
operations_str = template_args['upgrades']
import_str = template_args['imports']
else:
operations_str = " pass"
import_str = ""
with open(upgrade_file, 'w') as f:
f.write(UPGRADE_TEMPLATE % {
'depends_on': depends_on,
'repository': repository,
'year': date.today().year,
'operations': operations_str,
'imports': import_str,
'cls': ''.join(w.capitalize() or '_' for w in name.split('_'))
})
|
[
"def",
"_write_template",
"(",
"upgrade_file",
",",
"name",
",",
"depends_on",
",",
"repository",
",",
"auto",
"=",
"False",
")",
":",
"if",
"auto",
":",
"# Ensure all models are loaded",
"from",
"invenio_db",
"import",
"models",
"list",
"(",
"models",
")",
"template_args",
"=",
"produce_upgrade_operations",
"(",
")",
"operations_str",
"=",
"template_args",
"[",
"'upgrades'",
"]",
"import_str",
"=",
"template_args",
"[",
"'imports'",
"]",
"else",
":",
"operations_str",
"=",
"\" pass\"",
"import_str",
"=",
"\"\"",
"with",
"open",
"(",
"upgrade_file",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"UPGRADE_TEMPLATE",
"%",
"{",
"'depends_on'",
":",
"depends_on",
",",
"'repository'",
":",
"repository",
",",
"'year'",
":",
"date",
".",
"today",
"(",
")",
".",
"year",
",",
"'operations'",
":",
"operations_str",
",",
"'imports'",
":",
"import_str",
",",
"'cls'",
":",
"''",
".",
"join",
"(",
"w",
".",
"capitalize",
"(",
")",
"or",
"'_'",
"for",
"w",
"in",
"name",
".",
"split",
"(",
"'_'",
")",
")",
"}",
")"
] |
Write template to upgrade file.
|
[
"Write",
"template",
"to",
"upgrade",
"file",
"."
] |
cee4bcb118515463ecf6de1421642007f79a9fcd
|
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/cli.py#L341-L362
|
244,972
|
inveniosoftware-attic/invenio-upgrader
|
invenio_upgrader/cli.py
|
_upgrade_recipe_find_path
|
def _upgrade_recipe_find_path(import_str, create=True):
"""Determine repository name and path for new upgrade.
It is based on package import path.
"""
try:
# Import package
m = import_string(import_str)
# Check if package or module
if m.__package__ is not None and m.__package__ != m.__name__:
raise RuntimeError(
"Expected package but found module at '%s'." % import_str
)
# Create upgrade directory if it does not exists
path = os.path.join(os.path.dirname(m.__file__), "upgrades")
if not os.path.exists(path) and create:
os.makedirs(path)
# Create init file if it does not exists
init = os.path.join(path, "__init__.py")
if not os.path.exists(init) and create:
open(init, 'a').close()
repository = m.__name__.split(".")[-1]
return (path, repository)
except ImportError:
raise RuntimeError("Could not find module '%s'." % import_str)
except SyntaxError:
raise RuntimeError("Module '%s' has syntax errors." % import_str)
|
python
|
def _upgrade_recipe_find_path(import_str, create=True):
"""Determine repository name and path for new upgrade.
It is based on package import path.
"""
try:
# Import package
m = import_string(import_str)
# Check if package or module
if m.__package__ is not None and m.__package__ != m.__name__:
raise RuntimeError(
"Expected package but found module at '%s'." % import_str
)
# Create upgrade directory if it does not exists
path = os.path.join(os.path.dirname(m.__file__), "upgrades")
if not os.path.exists(path) and create:
os.makedirs(path)
# Create init file if it does not exists
init = os.path.join(path, "__init__.py")
if not os.path.exists(init) and create:
open(init, 'a').close()
repository = m.__name__.split(".")[-1]
return (path, repository)
except ImportError:
raise RuntimeError("Could not find module '%s'." % import_str)
except SyntaxError:
raise RuntimeError("Module '%s' has syntax errors." % import_str)
|
[
"def",
"_upgrade_recipe_find_path",
"(",
"import_str",
",",
"create",
"=",
"True",
")",
":",
"try",
":",
"# Import package",
"m",
"=",
"import_string",
"(",
"import_str",
")",
"# Check if package or module",
"if",
"m",
".",
"__package__",
"is",
"not",
"None",
"and",
"m",
".",
"__package__",
"!=",
"m",
".",
"__name__",
":",
"raise",
"RuntimeError",
"(",
"\"Expected package but found module at '%s'.\"",
"%",
"import_str",
")",
"# Create upgrade directory if it does not exists",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"m",
".",
"__file__",
")",
",",
"\"upgrades\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
"and",
"create",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"# Create init file if it does not exists",
"init",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"__init__.py\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"init",
")",
"and",
"create",
":",
"open",
"(",
"init",
",",
"'a'",
")",
".",
"close",
"(",
")",
"repository",
"=",
"m",
".",
"__name__",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
"return",
"(",
"path",
",",
"repository",
")",
"except",
"ImportError",
":",
"raise",
"RuntimeError",
"(",
"\"Could not find module '%s'.\"",
"%",
"import_str",
")",
"except",
"SyntaxError",
":",
"raise",
"RuntimeError",
"(",
"\"Module '%s' has syntax errors.\"",
"%",
"import_str",
")"
] |
Determine repository name and path for new upgrade.
It is based on package import path.
|
[
"Determine",
"repository",
"name",
"and",
"path",
"for",
"new",
"upgrade",
"."
] |
cee4bcb118515463ecf6de1421642007f79a9fcd
|
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/cli.py#L365-L396
|
244,973
|
narfman0/helga-youtube-metadata
|
helga_youtube_meta/plugin.py
|
youtube_meta
|
def youtube_meta(client, channel, nick, message, match):
""" Return meta information about a video """
if not API_KEY:
return 'You must set YOUTUBE_DATA_API_KEY in settings!'
identifier = match[0]
params = {
'id': identifier,
'key': API_KEY,
'part': 'snippet,statistics,contentDetails',
}
response = requests.get(API_ROOT, params=params)
if response.status_code != 200:
return 'Error in response, ' + str(response.status_code) + ' for identifier: ' + identifier
try:
data = response.json()['items'][0]
except:
print('Exception requesting info for identifier: ' + identifier)
traceback.print_exc()
response_dict = {
'title': data['snippet']['title'],
'poster': data['snippet']['channelTitle'],
'date': str(parse_date(data['snippet']['publishedAt'])),
'views': data['statistics']['viewCount'],
'likes': data['statistics']['likeCount'],
'dislikes': data['statistics']['dislikeCount'],
'duration': parse_duration(data['contentDetails']['duration']),
}
return RESPONSE_TEMPLATE.format(**response_dict).encode('utf-8').strip()
|
python
|
def youtube_meta(client, channel, nick, message, match):
""" Return meta information about a video """
if not API_KEY:
return 'You must set YOUTUBE_DATA_API_KEY in settings!'
identifier = match[0]
params = {
'id': identifier,
'key': API_KEY,
'part': 'snippet,statistics,contentDetails',
}
response = requests.get(API_ROOT, params=params)
if response.status_code != 200:
return 'Error in response, ' + str(response.status_code) + ' for identifier: ' + identifier
try:
data = response.json()['items'][0]
except:
print('Exception requesting info for identifier: ' + identifier)
traceback.print_exc()
response_dict = {
'title': data['snippet']['title'],
'poster': data['snippet']['channelTitle'],
'date': str(parse_date(data['snippet']['publishedAt'])),
'views': data['statistics']['viewCount'],
'likes': data['statistics']['likeCount'],
'dislikes': data['statistics']['dislikeCount'],
'duration': parse_duration(data['contentDetails']['duration']),
}
return RESPONSE_TEMPLATE.format(**response_dict).encode('utf-8').strip()
|
[
"def",
"youtube_meta",
"(",
"client",
",",
"channel",
",",
"nick",
",",
"message",
",",
"match",
")",
":",
"if",
"not",
"API_KEY",
":",
"return",
"'You must set YOUTUBE_DATA_API_KEY in settings!'",
"identifier",
"=",
"match",
"[",
"0",
"]",
"params",
"=",
"{",
"'id'",
":",
"identifier",
",",
"'key'",
":",
"API_KEY",
",",
"'part'",
":",
"'snippet,statistics,contentDetails'",
",",
"}",
"response",
"=",
"requests",
".",
"get",
"(",
"API_ROOT",
",",
"params",
"=",
"params",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"return",
"'Error in response, '",
"+",
"str",
"(",
"response",
".",
"status_code",
")",
"+",
"' for identifier: '",
"+",
"identifier",
"try",
":",
"data",
"=",
"response",
".",
"json",
"(",
")",
"[",
"'items'",
"]",
"[",
"0",
"]",
"except",
":",
"print",
"(",
"'Exception requesting info for identifier: '",
"+",
"identifier",
")",
"traceback",
".",
"print_exc",
"(",
")",
"response_dict",
"=",
"{",
"'title'",
":",
"data",
"[",
"'snippet'",
"]",
"[",
"'title'",
"]",
",",
"'poster'",
":",
"data",
"[",
"'snippet'",
"]",
"[",
"'channelTitle'",
"]",
",",
"'date'",
":",
"str",
"(",
"parse_date",
"(",
"data",
"[",
"'snippet'",
"]",
"[",
"'publishedAt'",
"]",
")",
")",
",",
"'views'",
":",
"data",
"[",
"'statistics'",
"]",
"[",
"'viewCount'",
"]",
",",
"'likes'",
":",
"data",
"[",
"'statistics'",
"]",
"[",
"'likeCount'",
"]",
",",
"'dislikes'",
":",
"data",
"[",
"'statistics'",
"]",
"[",
"'dislikeCount'",
"]",
",",
"'duration'",
":",
"parse_duration",
"(",
"data",
"[",
"'contentDetails'",
"]",
"[",
"'duration'",
"]",
")",
",",
"}",
"return",
"RESPONSE_TEMPLATE",
".",
"format",
"(",
"*",
"*",
"response_dict",
")",
".",
"encode",
"(",
"'utf-8'",
")",
".",
"strip",
"(",
")"
] |
Return meta information about a video
|
[
"Return",
"meta",
"information",
"about",
"a",
"video"
] |
1babc2e6404864a344cb173e8a03fa5de957059a
|
https://github.com/narfman0/helga-youtube-metadata/blob/1babc2e6404864a344cb173e8a03fa5de957059a/helga_youtube_meta/plugin.py#L26-L56
|
244,974
|
narfman0/helga-youtube-metadata
|
helga_youtube_meta/plugin.py
|
parse_duration
|
def parse_duration(duration):
""" Parse and prettify duration from youtube duration format """
duration_dict = re.search(DURATION_REGEX, duration).groupdict()
converted_dict = {}
# convert all values to ints, remove nones
for a, x in duration_dict.iteritems():
if x is not None:
converted_dict[a] = int(NON_DECIMAL.sub('', x))
return str(timedelta(**converted_dict))
|
python
|
def parse_duration(duration):
""" Parse and prettify duration from youtube duration format """
duration_dict = re.search(DURATION_REGEX, duration).groupdict()
converted_dict = {}
# convert all values to ints, remove nones
for a, x in duration_dict.iteritems():
if x is not None:
converted_dict[a] = int(NON_DECIMAL.sub('', x))
return str(timedelta(**converted_dict))
|
[
"def",
"parse_duration",
"(",
"duration",
")",
":",
"duration_dict",
"=",
"re",
".",
"search",
"(",
"DURATION_REGEX",
",",
"duration",
")",
".",
"groupdict",
"(",
")",
"converted_dict",
"=",
"{",
"}",
"# convert all values to ints, remove nones",
"for",
"a",
",",
"x",
"in",
"duration_dict",
".",
"iteritems",
"(",
")",
":",
"if",
"x",
"is",
"not",
"None",
":",
"converted_dict",
"[",
"a",
"]",
"=",
"int",
"(",
"NON_DECIMAL",
".",
"sub",
"(",
"''",
",",
"x",
")",
")",
"return",
"str",
"(",
"timedelta",
"(",
"*",
"*",
"converted_dict",
")",
")"
] |
Parse and prettify duration from youtube duration format
|
[
"Parse",
"and",
"prettify",
"duration",
"from",
"youtube",
"duration",
"format"
] |
1babc2e6404864a344cb173e8a03fa5de957059a
|
https://github.com/narfman0/helga-youtube-metadata/blob/1babc2e6404864a344cb173e8a03fa5de957059a/helga_youtube_meta/plugin.py#L59-L67
|
244,975
|
uw-it-aca/uw-restclients-mailman
|
uw_mailman/course_list.py
|
exists_course_list
|
def exists_course_list(curriculum_abbr, course_number, section_id,
quarter, year, joint=False):
"""
Return True if the corresponding mailman list exists for the course
"""
return exists(get_course_list_name(curriculum_abbr, course_number,
section_id, quarter, year, joint))
|
python
|
def exists_course_list(curriculum_abbr, course_number, section_id,
quarter, year, joint=False):
"""
Return True if the corresponding mailman list exists for the course
"""
return exists(get_course_list_name(curriculum_abbr, course_number,
section_id, quarter, year, joint))
|
[
"def",
"exists_course_list",
"(",
"curriculum_abbr",
",",
"course_number",
",",
"section_id",
",",
"quarter",
",",
"year",
",",
"joint",
"=",
"False",
")",
":",
"return",
"exists",
"(",
"get_course_list_name",
"(",
"curriculum_abbr",
",",
"course_number",
",",
"section_id",
",",
"quarter",
",",
"year",
",",
"joint",
")",
")"
] |
Return True if the corresponding mailman list exists for the course
|
[
"Return",
"True",
"if",
"the",
"corresponding",
"mailman",
"list",
"exists",
"for",
"the",
"course"
] |
ef077f2cc945871422fcd66391e82264e2384b2c
|
https://github.com/uw-it-aca/uw-restclients-mailman/blob/ef077f2cc945871422fcd66391e82264e2384b2c/uw_mailman/course_list.py#L32-L38
|
244,976
|
ludeeus/pylaunches
|
pylaunches/api.py
|
Launches.get_launches
|
async def get_launches(self):
"""Get launch information."""
from .common import CommonFunctions
common = CommonFunctions(self.loop, self.session)
all_launches = []
launches = {}
data = await common.api_call(BASE_URL)
if data is None:
LOGGER.error('Error getting launch information')
return
for launch in data['launches']:
lid = launch['id']
launches[lid] = {}
try:
launches[lid]['start'] = await common.iso(launch['wsstamp'])
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['start'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['wsstamp'] = launch['wsstamp']
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['wsstamp'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['name'] = launch['name']
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['name'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['agency'] = (launch['missions'][0]['agencies']
[0]['name'])
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['agency'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['agency_country_code'] = (launch['missions'][0]
['agencies'][0]
['countryCode'])
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['agency_country_code'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['stream'] = launch['vidURLs'][0]
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['stream'] = None
LOGGER.debug('Error getting launch information, %s', error)
all_launches.append(launches[lid])
self._launches = await common.sort_data(all_launches, 'start')
|
python
|
async def get_launches(self):
"""Get launch information."""
from .common import CommonFunctions
common = CommonFunctions(self.loop, self.session)
all_launches = []
launches = {}
data = await common.api_call(BASE_URL)
if data is None:
LOGGER.error('Error getting launch information')
return
for launch in data['launches']:
lid = launch['id']
launches[lid] = {}
try:
launches[lid]['start'] = await common.iso(launch['wsstamp'])
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['start'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['wsstamp'] = launch['wsstamp']
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['wsstamp'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['name'] = launch['name']
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['name'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['agency'] = (launch['missions'][0]['agencies']
[0]['name'])
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['agency'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['agency_country_code'] = (launch['missions'][0]
['agencies'][0]
['countryCode'])
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['agency_country_code'] = None
LOGGER.debug('Error getting launch information, %s', error)
try:
launches[lid]['stream'] = launch['vidURLs'][0]
except (LaunchesError, IndexError, KeyError, TypeError) as error:
launches[lid]['stream'] = None
LOGGER.debug('Error getting launch information, %s', error)
all_launches.append(launches[lid])
self._launches = await common.sort_data(all_launches, 'start')
|
[
"async",
"def",
"get_launches",
"(",
"self",
")",
":",
"from",
".",
"common",
"import",
"CommonFunctions",
"common",
"=",
"CommonFunctions",
"(",
"self",
".",
"loop",
",",
"self",
".",
"session",
")",
"all_launches",
"=",
"[",
"]",
"launches",
"=",
"{",
"}",
"data",
"=",
"await",
"common",
".",
"api_call",
"(",
"BASE_URL",
")",
"if",
"data",
"is",
"None",
":",
"LOGGER",
".",
"error",
"(",
"'Error getting launch information'",
")",
"return",
"for",
"launch",
"in",
"data",
"[",
"'launches'",
"]",
":",
"lid",
"=",
"launch",
"[",
"'id'",
"]",
"launches",
"[",
"lid",
"]",
"=",
"{",
"}",
"try",
":",
"launches",
"[",
"lid",
"]",
"[",
"'start'",
"]",
"=",
"await",
"common",
".",
"iso",
"(",
"launch",
"[",
"'wsstamp'",
"]",
")",
"except",
"(",
"LaunchesError",
",",
"IndexError",
",",
"KeyError",
",",
"TypeError",
")",
"as",
"error",
":",
"launches",
"[",
"lid",
"]",
"[",
"'start'",
"]",
"=",
"None",
"LOGGER",
".",
"debug",
"(",
"'Error getting launch information, %s'",
",",
"error",
")",
"try",
":",
"launches",
"[",
"lid",
"]",
"[",
"'wsstamp'",
"]",
"=",
"launch",
"[",
"'wsstamp'",
"]",
"except",
"(",
"LaunchesError",
",",
"IndexError",
",",
"KeyError",
",",
"TypeError",
")",
"as",
"error",
":",
"launches",
"[",
"lid",
"]",
"[",
"'wsstamp'",
"]",
"=",
"None",
"LOGGER",
".",
"debug",
"(",
"'Error getting launch information, %s'",
",",
"error",
")",
"try",
":",
"launches",
"[",
"lid",
"]",
"[",
"'name'",
"]",
"=",
"launch",
"[",
"'name'",
"]",
"except",
"(",
"LaunchesError",
",",
"IndexError",
",",
"KeyError",
",",
"TypeError",
")",
"as",
"error",
":",
"launches",
"[",
"lid",
"]",
"[",
"'name'",
"]",
"=",
"None",
"LOGGER",
".",
"debug",
"(",
"'Error getting launch information, %s'",
",",
"error",
")",
"try",
":",
"launches",
"[",
"lid",
"]",
"[",
"'agency'",
"]",
"=",
"(",
"launch",
"[",
"'missions'",
"]",
"[",
"0",
"]",
"[",
"'agencies'",
"]",
"[",
"0",
"]",
"[",
"'name'",
"]",
")",
"except",
"(",
"LaunchesError",
",",
"IndexError",
",",
"KeyError",
",",
"TypeError",
")",
"as",
"error",
":",
"launches",
"[",
"lid",
"]",
"[",
"'agency'",
"]",
"=",
"None",
"LOGGER",
".",
"debug",
"(",
"'Error getting launch information, %s'",
",",
"error",
")",
"try",
":",
"launches",
"[",
"lid",
"]",
"[",
"'agency_country_code'",
"]",
"=",
"(",
"launch",
"[",
"'missions'",
"]",
"[",
"0",
"]",
"[",
"'agencies'",
"]",
"[",
"0",
"]",
"[",
"'countryCode'",
"]",
")",
"except",
"(",
"LaunchesError",
",",
"IndexError",
",",
"KeyError",
",",
"TypeError",
")",
"as",
"error",
":",
"launches",
"[",
"lid",
"]",
"[",
"'agency_country_code'",
"]",
"=",
"None",
"LOGGER",
".",
"debug",
"(",
"'Error getting launch information, %s'",
",",
"error",
")",
"try",
":",
"launches",
"[",
"lid",
"]",
"[",
"'stream'",
"]",
"=",
"launch",
"[",
"'vidURLs'",
"]",
"[",
"0",
"]",
"except",
"(",
"LaunchesError",
",",
"IndexError",
",",
"KeyError",
",",
"TypeError",
")",
"as",
"error",
":",
"launches",
"[",
"lid",
"]",
"[",
"'stream'",
"]",
"=",
"None",
"LOGGER",
".",
"debug",
"(",
"'Error getting launch information, %s'",
",",
"error",
")",
"all_launches",
".",
"append",
"(",
"launches",
"[",
"lid",
"]",
")",
"self",
".",
"_launches",
"=",
"await",
"common",
".",
"sort_data",
"(",
"all_launches",
",",
"'start'",
")"
] |
Get launch information.
|
[
"Get",
"launch",
"information",
"."
] |
6cc449a9f734cbf789e561564b500a5dca93fe82
|
https://github.com/ludeeus/pylaunches/blob/6cc449a9f734cbf789e561564b500a5dca93fe82/pylaunches/api.py#L20-L67
|
244,977
|
dossier/dossier.models
|
dossier/models/openquery/google.py
|
Google.web_search
|
def web_search(self, query, start=0, limit=100, max_tries=3):
'''
encapsulates urllib retrieval for fetching JSON results from
Google's Custom Search API. Returns a deserialized result set.
'''
tries = 0
if isinstance(query, unicode):
query = query.encode('utf8')
url = self.url % dict(key=self.api_key,
query=urllib.quote(query.strip()),
num=min(10, limit - start),
start=start)
logger.info("fetching: %s" % url)
while 1:
try:
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
# We do we set this? Remnant from pre-API version?
request.add_header('User-Agent', USER_AGENT)
opener = urllib2.build_opener()
fh = opener.open(request, timeout=60)
data = fh.read()
if fh.headers.get('Content-Encoding') == 'gzip':
compressedstream = StringIO.StringIO(data)
fh = gzip.GzipFile(fileobj=compressedstream)
data = fh.read()
return json.loads(data)
except Exception, exc:
logger.info(traceback.format_exc(exc))
if tries >= max_tries:
sys.exit("failed %d times to fetch %s" % (max_tries, url))
else:
logger.info("failed to fetch\n\t%s\nwill try "
"%d more times" % (url, max_tries - tries))
tries += 1
time.sleep(2 ** tries)
|
python
|
def web_search(self, query, start=0, limit=100, max_tries=3):
'''
encapsulates urllib retrieval for fetching JSON results from
Google's Custom Search API. Returns a deserialized result set.
'''
tries = 0
if isinstance(query, unicode):
query = query.encode('utf8')
url = self.url % dict(key=self.api_key,
query=urllib.quote(query.strip()),
num=min(10, limit - start),
start=start)
logger.info("fetching: %s" % url)
while 1:
try:
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
# We do we set this? Remnant from pre-API version?
request.add_header('User-Agent', USER_AGENT)
opener = urllib2.build_opener()
fh = opener.open(request, timeout=60)
data = fh.read()
if fh.headers.get('Content-Encoding') == 'gzip':
compressedstream = StringIO.StringIO(data)
fh = gzip.GzipFile(fileobj=compressedstream)
data = fh.read()
return json.loads(data)
except Exception, exc:
logger.info(traceback.format_exc(exc))
if tries >= max_tries:
sys.exit("failed %d times to fetch %s" % (max_tries, url))
else:
logger.info("failed to fetch\n\t%s\nwill try "
"%d more times" % (url, max_tries - tries))
tries += 1
time.sleep(2 ** tries)
|
[
"def",
"web_search",
"(",
"self",
",",
"query",
",",
"start",
"=",
"0",
",",
"limit",
"=",
"100",
",",
"max_tries",
"=",
"3",
")",
":",
"tries",
"=",
"0",
"if",
"isinstance",
"(",
"query",
",",
"unicode",
")",
":",
"query",
"=",
"query",
".",
"encode",
"(",
"'utf8'",
")",
"url",
"=",
"self",
".",
"url",
"%",
"dict",
"(",
"key",
"=",
"self",
".",
"api_key",
",",
"query",
"=",
"urllib",
".",
"quote",
"(",
"query",
".",
"strip",
"(",
")",
")",
",",
"num",
"=",
"min",
"(",
"10",
",",
"limit",
"-",
"start",
")",
",",
"start",
"=",
"start",
")",
"logger",
".",
"info",
"(",
"\"fetching: %s\"",
"%",
"url",
")",
"while",
"1",
":",
"try",
":",
"request",
"=",
"urllib2",
".",
"Request",
"(",
"url",
")",
"request",
".",
"add_header",
"(",
"'Accept-encoding'",
",",
"'gzip'",
")",
"# We do we set this? Remnant from pre-API version?",
"request",
".",
"add_header",
"(",
"'User-Agent'",
",",
"USER_AGENT",
")",
"opener",
"=",
"urllib2",
".",
"build_opener",
"(",
")",
"fh",
"=",
"opener",
".",
"open",
"(",
"request",
",",
"timeout",
"=",
"60",
")",
"data",
"=",
"fh",
".",
"read",
"(",
")",
"if",
"fh",
".",
"headers",
".",
"get",
"(",
"'Content-Encoding'",
")",
"==",
"'gzip'",
":",
"compressedstream",
"=",
"StringIO",
".",
"StringIO",
"(",
"data",
")",
"fh",
"=",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"compressedstream",
")",
"data",
"=",
"fh",
".",
"read",
"(",
")",
"return",
"json",
".",
"loads",
"(",
"data",
")",
"except",
"Exception",
",",
"exc",
":",
"logger",
".",
"info",
"(",
"traceback",
".",
"format_exc",
"(",
"exc",
")",
")",
"if",
"tries",
">=",
"max_tries",
":",
"sys",
".",
"exit",
"(",
"\"failed %d times to fetch %s\"",
"%",
"(",
"max_tries",
",",
"url",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"failed to fetch\\n\\t%s\\nwill try \"",
"\"%d more times\"",
"%",
"(",
"url",
",",
"max_tries",
"-",
"tries",
")",
")",
"tries",
"+=",
"1",
"time",
".",
"sleep",
"(",
"2",
"**",
"tries",
")"
] |
encapsulates urllib retrieval for fetching JSON results from
Google's Custom Search API. Returns a deserialized result set.
|
[
"encapsulates",
"urllib",
"retrieval",
"for",
"fetching",
"JSON",
"results",
"from",
"Google",
"s",
"Custom",
"Search",
"API",
".",
"Returns",
"a",
"deserialized",
"result",
"set",
"."
] |
c9e282f690eab72963926329efe1600709e48b13
|
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/openquery/google.py#L57-L93
|
244,978
|
twidi/py-dataql
|
dataql/solvers/filters.py
|
Solver.can_solve
|
def can_solve(cls, filter_):
"""Tells if the solver is able to resolve the given filter.
Arguments
---------
filter_ : subclass of dataql.resources.BaseFilter
The subclass or ``BaseFilter`` to check if it is solvable by the current solver class.
Returns
-------
boolean
``True`` if the current solver class can solve the given filter, ``False`` otherwise.
Example
-------
>>> FilterSolver.solvable_filters
(<class 'dataql.resources.Filter'>,)
>>> FilterSolver.can_solve(Filter(name='foo'))
True
>>> SliceSolver.can_solve(Filter(name='foo'))
False
"""
for solvable_filter in cls.solvable_filters:
if isinstance(filter_, solvable_filter):
return True
return False
|
python
|
def can_solve(cls, filter_):
"""Tells if the solver is able to resolve the given filter.
Arguments
---------
filter_ : subclass of dataql.resources.BaseFilter
The subclass or ``BaseFilter`` to check if it is solvable by the current solver class.
Returns
-------
boolean
``True`` if the current solver class can solve the given filter, ``False`` otherwise.
Example
-------
>>> FilterSolver.solvable_filters
(<class 'dataql.resources.Filter'>,)
>>> FilterSolver.can_solve(Filter(name='foo'))
True
>>> SliceSolver.can_solve(Filter(name='foo'))
False
"""
for solvable_filter in cls.solvable_filters:
if isinstance(filter_, solvable_filter):
return True
return False
|
[
"def",
"can_solve",
"(",
"cls",
",",
"filter_",
")",
":",
"for",
"solvable_filter",
"in",
"cls",
".",
"solvable_filters",
":",
"if",
"isinstance",
"(",
"filter_",
",",
"solvable_filter",
")",
":",
"return",
"True",
"return",
"False"
] |
Tells if the solver is able to resolve the given filter.
Arguments
---------
filter_ : subclass of dataql.resources.BaseFilter
The subclass or ``BaseFilter`` to check if it is solvable by the current solver class.
Returns
-------
boolean
``True`` if the current solver class can solve the given filter, ``False`` otherwise.
Example
-------
>>> FilterSolver.solvable_filters
(<class 'dataql.resources.Filter'>,)
>>> FilterSolver.can_solve(Filter(name='foo'))
True
>>> SliceSolver.can_solve(Filter(name='foo'))
False
|
[
"Tells",
"if",
"the",
"solver",
"is",
"able",
"to",
"resolve",
"the",
"given",
"filter",
"."
] |
5841a3fd559829193ed709c255166085bdde1c52
|
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/filters.py#L103-L131
|
244,979
|
twidi/py-dataql
|
dataql/solvers/filters.py
|
FilterSolver.solve
|
def solve(self, value, filter_):
"""Returns the value of an attribute of the value, or the result of a call to a function.
Arguments
---------
value : ?
A value to solve in combination with the given filter.
filter_ : dataql.resource.Filter
An instance of ``Filter`` to solve with the given value.
Returns
-------
Depending on the source, the filter may ask for an attribute of the value, or for the
result of a call to a standalone function taking the value as first argument.
This method returns this attribute or result.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date, ['day', 'strftime'])
>>> solver = FilterSolver(registry)
>>> solver.solve(date(2015, 6, 1), Filter(name='day'))
1
>>> from dataql.resources import PosArg
>>> solver.solve(date(2015, 6, 1), Filter(name='strftime', args=[PosArg('%F')]))
'2015-06-01'
"""
args, kwargs = filter_.get_args_and_kwargs()
source = self.registry[value]
return source.solve(value, filter_.name, args, kwargs)
|
python
|
def solve(self, value, filter_):
"""Returns the value of an attribute of the value, or the result of a call to a function.
Arguments
---------
value : ?
A value to solve in combination with the given filter.
filter_ : dataql.resource.Filter
An instance of ``Filter`` to solve with the given value.
Returns
-------
Depending on the source, the filter may ask for an attribute of the value, or for the
result of a call to a standalone function taking the value as first argument.
This method returns this attribute or result.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date, ['day', 'strftime'])
>>> solver = FilterSolver(registry)
>>> solver.solve(date(2015, 6, 1), Filter(name='day'))
1
>>> from dataql.resources import PosArg
>>> solver.solve(date(2015, 6, 1), Filter(name='strftime', args=[PosArg('%F')]))
'2015-06-01'
"""
args, kwargs = filter_.get_args_and_kwargs()
source = self.registry[value]
return source.solve(value, filter_.name, args, kwargs)
|
[
"def",
"solve",
"(",
"self",
",",
"value",
",",
"filter_",
")",
":",
"args",
",",
"kwargs",
"=",
"filter_",
".",
"get_args_and_kwargs",
"(",
")",
"source",
"=",
"self",
".",
"registry",
"[",
"value",
"]",
"return",
"source",
".",
"solve",
"(",
"value",
",",
"filter_",
".",
"name",
",",
"args",
",",
"kwargs",
")"
] |
Returns the value of an attribute of the value, or the result of a call to a function.
Arguments
---------
value : ?
A value to solve in combination with the given filter.
filter_ : dataql.resource.Filter
An instance of ``Filter`` to solve with the given value.
Returns
-------
Depending on the source, the filter may ask for an attribute of the value, or for the
result of a call to a standalone function taking the value as first argument.
This method returns this attribute or result.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date, ['day', 'strftime'])
>>> solver = FilterSolver(registry)
>>> solver.solve(date(2015, 6, 1), Filter(name='day'))
1
>>> from dataql.resources import PosArg
>>> solver.solve(date(2015, 6, 1), Filter(name='strftime', args=[PosArg('%F')]))
'2015-06-01'
|
[
"Returns",
"the",
"value",
"of",
"an",
"attribute",
"of",
"the",
"value",
"or",
"the",
"result",
"of",
"a",
"call",
"to",
"a",
"function",
"."
] |
5841a3fd559829193ed709c255166085bdde1c52
|
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/filters.py#L153-L187
|
244,980
|
twidi/py-dataql
|
dataql/solvers/filters.py
|
SliceSolver.solve
|
def solve(self, value, filter_):
"""Get slice or entry defined by an index from the given value.
Arguments
---------
value : ?
A value to solve in combination with the given filter.
filter_ : dataql.resource.SliceFilter
An instance of ``SliceFilter``to solve with the given value.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> solver = SliceSolver(registry)
>>> solver.solve([1, 2, 3], SliceFilter(1))
2
>>> solver.solve([1, 2, 3], SliceFilter(slice(1, None, None)))
[2, 3]
>>> solver.solve([1, 2, 3], SliceFilter(slice(0, 2, 2)))
[1]
>>> solver.solve([1, 2, 3], SliceFilter(4))
"""
try:
return value[filter_.slice or filter_.index]
except IndexError:
return None
|
python
|
def solve(self, value, filter_):
"""Get slice or entry defined by an index from the given value.
Arguments
---------
value : ?
A value to solve in combination with the given filter.
filter_ : dataql.resource.SliceFilter
An instance of ``SliceFilter``to solve with the given value.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> solver = SliceSolver(registry)
>>> solver.solve([1, 2, 3], SliceFilter(1))
2
>>> solver.solve([1, 2, 3], SliceFilter(slice(1, None, None)))
[2, 3]
>>> solver.solve([1, 2, 3], SliceFilter(slice(0, 2, 2)))
[1]
>>> solver.solve([1, 2, 3], SliceFilter(4))
"""
try:
return value[filter_.slice or filter_.index]
except IndexError:
return None
|
[
"def",
"solve",
"(",
"self",
",",
"value",
",",
"filter_",
")",
":",
"try",
":",
"return",
"value",
"[",
"filter_",
".",
"slice",
"or",
"filter_",
".",
"index",
"]",
"except",
"IndexError",
":",
"return",
"None"
] |
Get slice or entry defined by an index from the given value.
Arguments
---------
value : ?
A value to solve in combination with the given filter.
filter_ : dataql.resource.SliceFilter
An instance of ``SliceFilter``to solve with the given value.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> solver = SliceSolver(registry)
>>> solver.solve([1, 2, 3], SliceFilter(1))
2
>>> solver.solve([1, 2, 3], SliceFilter(slice(1, None, None)))
[2, 3]
>>> solver.solve([1, 2, 3], SliceFilter(slice(0, 2, 2)))
[1]
>>> solver.solve([1, 2, 3], SliceFilter(4))
|
[
"Get",
"slice",
"or",
"entry",
"defined",
"by",
"an",
"index",
"from",
"the",
"given",
"value",
"."
] |
5841a3fd559829193ed709c255166085bdde1c52
|
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/filters.py#L209-L238
|
244,981
|
RazerM/bucketcache
|
bucketcache/utilities.py
|
normalize_args
|
def normalize_args(f, *args, **kwargs):
"""Normalize call arguments into keyword form and varargs.
args can only be non-empty if there is *args in the argument specification.
"""
callargs = inspect.getcallargs(f, *args, **kwargs)
original_callargs = callargs.copy()
try:
argspec = inspect.getargspec(f)
except ValueError:
argspec = inspect.getfullargspec(f)
else:
argspec = fullargspec_from_argspec(argspec)
if hasattr(argspec, 'varkw'):
if argspec.varkw:
kwargs = callargs.pop(argspec.varkw, {})
callargs.update(kwargs)
if argspec.varargs:
varargs = callargs.pop(argspec.varargs, ())
else:
varargs = ()
# now callargs is all keywords
return NormalizedArgs(varargs=varargs,
normargs=callargs,
callargs=original_callargs)
|
python
|
def normalize_args(f, *args, **kwargs):
"""Normalize call arguments into keyword form and varargs.
args can only be non-empty if there is *args in the argument specification.
"""
callargs = inspect.getcallargs(f, *args, **kwargs)
original_callargs = callargs.copy()
try:
argspec = inspect.getargspec(f)
except ValueError:
argspec = inspect.getfullargspec(f)
else:
argspec = fullargspec_from_argspec(argspec)
if hasattr(argspec, 'varkw'):
if argspec.varkw:
kwargs = callargs.pop(argspec.varkw, {})
callargs.update(kwargs)
if argspec.varargs:
varargs = callargs.pop(argspec.varargs, ())
else:
varargs = ()
# now callargs is all keywords
return NormalizedArgs(varargs=varargs,
normargs=callargs,
callargs=original_callargs)
|
[
"def",
"normalize_args",
"(",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"callargs",
"=",
"inspect",
".",
"getcallargs",
"(",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"original_callargs",
"=",
"callargs",
".",
"copy",
"(",
")",
"try",
":",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"f",
")",
"except",
"ValueError",
":",
"argspec",
"=",
"inspect",
".",
"getfullargspec",
"(",
"f",
")",
"else",
":",
"argspec",
"=",
"fullargspec_from_argspec",
"(",
"argspec",
")",
"if",
"hasattr",
"(",
"argspec",
",",
"'varkw'",
")",
":",
"if",
"argspec",
".",
"varkw",
":",
"kwargs",
"=",
"callargs",
".",
"pop",
"(",
"argspec",
".",
"varkw",
",",
"{",
"}",
")",
"callargs",
".",
"update",
"(",
"kwargs",
")",
"if",
"argspec",
".",
"varargs",
":",
"varargs",
"=",
"callargs",
".",
"pop",
"(",
"argspec",
".",
"varargs",
",",
"(",
")",
")",
"else",
":",
"varargs",
"=",
"(",
")",
"# now callargs is all keywords",
"return",
"NormalizedArgs",
"(",
"varargs",
"=",
"varargs",
",",
"normargs",
"=",
"callargs",
",",
"callargs",
"=",
"original_callargs",
")"
] |
Normalize call arguments into keyword form and varargs.
args can only be non-empty if there is *args in the argument specification.
|
[
"Normalize",
"call",
"arguments",
"into",
"keyword",
"form",
"and",
"varargs",
"."
] |
8d9b163b73da8c498793cce2f22f6a7cbe524d94
|
https://github.com/RazerM/bucketcache/blob/8d9b163b73da8c498793cce2f22f6a7cbe524d94/bucketcache/utilities.py#L233-L261
|
244,982
|
Who8MyLunch/OrderedNamespace
|
ordered_namespace/core.py
|
Struct.update
|
def update(self, *args, **kwargs):
"""Update self with new content
"""
d = {}
d.update(*args, **kwargs)
for key, value in d.items():
self[key] = value
|
python
|
def update(self, *args, **kwargs):
"""Update self with new content
"""
d = {}
d.update(*args, **kwargs)
for key, value in d.items():
self[key] = value
|
[
"def",
"update",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"d",
"=",
"{",
"}",
"d",
".",
"update",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"key",
",",
"value",
"in",
"d",
".",
"items",
"(",
")",
":",
"self",
"[",
"key",
"]",
"=",
"value"
] |
Update self with new content
|
[
"Update",
"self",
"with",
"new",
"content"
] |
f14b7e76afe3379f1696c96e8d06ef6fbf923f00
|
https://github.com/Who8MyLunch/OrderedNamespace/blob/f14b7e76afe3379f1696c96e8d06ef6fbf923f00/ordered_namespace/core.py#L46-L52
|
244,983
|
Who8MyLunch/OrderedNamespace
|
ordered_namespace/core.py
|
Struct.asdict
|
def asdict(self):
"""Return a recursive dict representation of self
"""
d = dict(self._odict)
for k,v in d.items():
if isinstance(v, Struct):
d[k] = v.asdict()
return d
|
python
|
def asdict(self):
"""Return a recursive dict representation of self
"""
d = dict(self._odict)
for k,v in d.items():
if isinstance(v, Struct):
d[k] = v.asdict()
return d
|
[
"def",
"asdict",
"(",
"self",
")",
":",
"d",
"=",
"dict",
"(",
"self",
".",
"_odict",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"Struct",
")",
":",
"d",
"[",
"k",
"]",
"=",
"v",
".",
"asdict",
"(",
")",
"return",
"d"
] |
Return a recursive dict representation of self
|
[
"Return",
"a",
"recursive",
"dict",
"representation",
"of",
"self"
] |
f14b7e76afe3379f1696c96e8d06ef6fbf923f00
|
https://github.com/Who8MyLunch/OrderedNamespace/blob/f14b7e76afe3379f1696c96e8d06ef6fbf923f00/ordered_namespace/core.py#L68-L77
|
244,984
|
lsst-sqre/BitlyOAuth2ProxySession
|
BitlyOAuth2ProxySession/Session.py
|
_extract_authenticity_token
|
def _extract_authenticity_token(data):
"""Don't look, I'm hideous!"""
# Super-cheap Python3 hack.
if not isinstance(data, str):
data = str(data, 'utf-8')
pos = data.find("authenticity_token")
# Super-gross.
authtok = str(data[pos + 41:pos + 41 + 88])
return authtok
|
python
|
def _extract_authenticity_token(data):
"""Don't look, I'm hideous!"""
# Super-cheap Python3 hack.
if not isinstance(data, str):
data = str(data, 'utf-8')
pos = data.find("authenticity_token")
# Super-gross.
authtok = str(data[pos + 41:pos + 41 + 88])
return authtok
|
[
"def",
"_extract_authenticity_token",
"(",
"data",
")",
":",
"# Super-cheap Python3 hack.",
"if",
"not",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"data",
"=",
"str",
"(",
"data",
",",
"'utf-8'",
")",
"pos",
"=",
"data",
".",
"find",
"(",
"\"authenticity_token\"",
")",
"# Super-gross.",
"authtok",
"=",
"str",
"(",
"data",
"[",
"pos",
"+",
"41",
":",
"pos",
"+",
"41",
"+",
"88",
"]",
")",
"return",
"authtok"
] |
Don't look, I'm hideous!
|
[
"Don",
"t",
"look",
"I",
"m",
"hideous!"
] |
4d3839cfb9b897f46cffc41a5f6ff7c645a5f202
|
https://github.com/lsst-sqre/BitlyOAuth2ProxySession/blob/4d3839cfb9b897f46cffc41a5f6ff7c645a5f202/BitlyOAuth2ProxySession/Session.py#L87-L95
|
244,985
|
lsst-sqre/BitlyOAuth2ProxySession
|
BitlyOAuth2ProxySession/Session.py
|
Session.authenticate
|
def authenticate(self):
"""Authenticate the session"""
postdata = self.authentication_postdata
jar = requests.cookies.cookielib.CookieJar()
self.cookies = jar
resp = self.get(self.authentication_base_url)
authtok = _extract_authenticity_token(resp.content)
if postdata is None:
# This works for GitHub
postdata = {"login": self.oauth2_username,
"password": self._oauth2_password,
"authenticity_token": authtok,
"commit": "Sign+in",
"utf8": u"\u2713",
} # pylint: disable=bad-continuation
self.authentication_postdata = postdata
if self.authentication_session_url is None:
# This is also for GitHub
authentication_session_url = "https://github.com/session"
self.authentication_session_url = authentication_session_url
self.post(self.authentication_session_url, data=postdata)
|
python
|
def authenticate(self):
"""Authenticate the session"""
postdata = self.authentication_postdata
jar = requests.cookies.cookielib.CookieJar()
self.cookies = jar
resp = self.get(self.authentication_base_url)
authtok = _extract_authenticity_token(resp.content)
if postdata is None:
# This works for GitHub
postdata = {"login": self.oauth2_username,
"password": self._oauth2_password,
"authenticity_token": authtok,
"commit": "Sign+in",
"utf8": u"\u2713",
} # pylint: disable=bad-continuation
self.authentication_postdata = postdata
if self.authentication_session_url is None:
# This is also for GitHub
authentication_session_url = "https://github.com/session"
self.authentication_session_url = authentication_session_url
self.post(self.authentication_session_url, data=postdata)
|
[
"def",
"authenticate",
"(",
"self",
")",
":",
"postdata",
"=",
"self",
".",
"authentication_postdata",
"jar",
"=",
"requests",
".",
"cookies",
".",
"cookielib",
".",
"CookieJar",
"(",
")",
"self",
".",
"cookies",
"=",
"jar",
"resp",
"=",
"self",
".",
"get",
"(",
"self",
".",
"authentication_base_url",
")",
"authtok",
"=",
"_extract_authenticity_token",
"(",
"resp",
".",
"content",
")",
"if",
"postdata",
"is",
"None",
":",
"# This works for GitHub",
"postdata",
"=",
"{",
"\"login\"",
":",
"self",
".",
"oauth2_username",
",",
"\"password\"",
":",
"self",
".",
"_oauth2_password",
",",
"\"authenticity_token\"",
":",
"authtok",
",",
"\"commit\"",
":",
"\"Sign+in\"",
",",
"\"utf8\"",
":",
"u\"\\u2713\"",
",",
"}",
"# pylint: disable=bad-continuation",
"self",
".",
"authentication_postdata",
"=",
"postdata",
"if",
"self",
".",
"authentication_session_url",
"is",
"None",
":",
"# This is also for GitHub",
"authentication_session_url",
"=",
"\"https://github.com/session\"",
"self",
".",
"authentication_session_url",
"=",
"authentication_session_url",
"self",
".",
"post",
"(",
"self",
".",
"authentication_session_url",
",",
"data",
"=",
"postdata",
")"
] |
Authenticate the session
|
[
"Authenticate",
"the",
"session"
] |
4d3839cfb9b897f46cffc41a5f6ff7c645a5f202
|
https://github.com/lsst-sqre/BitlyOAuth2ProxySession/blob/4d3839cfb9b897f46cffc41a5f6ff7c645a5f202/BitlyOAuth2ProxySession/Session.py#L64-L84
|
244,986
|
opieters/SnakeTeX
|
snaketex/stex.py
|
cli
|
def cli(ctx, config, debug):
"""SnakTeX command line interface - write LaTeX faster through templating."""
ctx.obj['config'] = config
ctx.obj['engine'] = stex.SnakeTeX(config_file=config, debug=debug)
|
python
|
def cli(ctx, config, debug):
"""SnakTeX command line interface - write LaTeX faster through templating."""
ctx.obj['config'] = config
ctx.obj['engine'] = stex.SnakeTeX(config_file=config, debug=debug)
|
[
"def",
"cli",
"(",
"ctx",
",",
"config",
",",
"debug",
")",
":",
"ctx",
".",
"obj",
"[",
"'config'",
"]",
"=",
"config",
"ctx",
".",
"obj",
"[",
"'engine'",
"]",
"=",
"stex",
".",
"SnakeTeX",
"(",
"config_file",
"=",
"config",
",",
"debug",
"=",
"debug",
")"
] |
SnakTeX command line interface - write LaTeX faster through templating.
|
[
"SnakTeX",
"command",
"line",
"interface",
"-",
"write",
"LaTeX",
"faster",
"through",
"templating",
"."
] |
0ceba577a5bf2ff6bb686c426a6f5b1393b99471
|
https://github.com/opieters/SnakeTeX/blob/0ceba577a5bf2ff6bb686c426a6f5b1393b99471/snaketex/stex.py#L10-L13
|
244,987
|
etcher-be/emiz
|
emiz/miz.py
|
Miz.zip
|
def zip(self, destination: typing.Union[str, Path] = None, encode: bool = True) -> str:
"""
Write mission, dictionary etc. to a MIZ file
Args:
destination: target MIZ file (if none, defaults to source MIZ + "_EMIZ"
Returns: destination file
"""
if encode:
self._encode()
if destination is None:
destination_path = self.miz_path.parent.joinpath(f'{self.miz_path.stem}_EMIZ.miz')
else:
destination_path = elib.path.ensure_file(destination, must_exist=False)
LOGGER.debug('zipping mission to: %s', destination_path)
destination_path.write_bytes(dummy_miz)
with ZipFile(str(destination_path), mode='w', compression=8) as zip_file:
for root, _, items in os.walk(self.temp_dir.absolute()):
for item in items:
item_abs_path = Path(root, item).absolute()
item_rel_path = Path(item_abs_path).relative_to(self.temp_dir)
zip_file.write(item_abs_path, arcname=item_rel_path)
return str(destination_path)
|
python
|
def zip(self, destination: typing.Union[str, Path] = None, encode: bool = True) -> str:
"""
Write mission, dictionary etc. to a MIZ file
Args:
destination: target MIZ file (if none, defaults to source MIZ + "_EMIZ"
Returns: destination file
"""
if encode:
self._encode()
if destination is None:
destination_path = self.miz_path.parent.joinpath(f'{self.miz_path.stem}_EMIZ.miz')
else:
destination_path = elib.path.ensure_file(destination, must_exist=False)
LOGGER.debug('zipping mission to: %s', destination_path)
destination_path.write_bytes(dummy_miz)
with ZipFile(str(destination_path), mode='w', compression=8) as zip_file:
for root, _, items in os.walk(self.temp_dir.absolute()):
for item in items:
item_abs_path = Path(root, item).absolute()
item_rel_path = Path(item_abs_path).relative_to(self.temp_dir)
zip_file.write(item_abs_path, arcname=item_rel_path)
return str(destination_path)
|
[
"def",
"zip",
"(",
"self",
",",
"destination",
":",
"typing",
".",
"Union",
"[",
"str",
",",
"Path",
"]",
"=",
"None",
",",
"encode",
":",
"bool",
"=",
"True",
")",
"->",
"str",
":",
"if",
"encode",
":",
"self",
".",
"_encode",
"(",
")",
"if",
"destination",
"is",
"None",
":",
"destination_path",
"=",
"self",
".",
"miz_path",
".",
"parent",
".",
"joinpath",
"(",
"f'{self.miz_path.stem}_EMIZ.miz'",
")",
"else",
":",
"destination_path",
"=",
"elib",
".",
"path",
".",
"ensure_file",
"(",
"destination",
",",
"must_exist",
"=",
"False",
")",
"LOGGER",
".",
"debug",
"(",
"'zipping mission to: %s'",
",",
"destination_path",
")",
"destination_path",
".",
"write_bytes",
"(",
"dummy_miz",
")",
"with",
"ZipFile",
"(",
"str",
"(",
"destination_path",
")",
",",
"mode",
"=",
"'w'",
",",
"compression",
"=",
"8",
")",
"as",
"zip_file",
":",
"for",
"root",
",",
"_",
",",
"items",
"in",
"os",
".",
"walk",
"(",
"self",
".",
"temp_dir",
".",
"absolute",
"(",
")",
")",
":",
"for",
"item",
"in",
"items",
":",
"item_abs_path",
"=",
"Path",
"(",
"root",
",",
"item",
")",
".",
"absolute",
"(",
")",
"item_rel_path",
"=",
"Path",
"(",
"item_abs_path",
")",
".",
"relative_to",
"(",
"self",
".",
"temp_dir",
")",
"zip_file",
".",
"write",
"(",
"item_abs_path",
",",
"arcname",
"=",
"item_rel_path",
")",
"return",
"str",
"(",
"destination_path",
")"
] |
Write mission, dictionary etc. to a MIZ file
Args:
destination: target MIZ file (if none, defaults to source MIZ + "_EMIZ"
Returns: destination file
|
[
"Write",
"mission",
"dictionary",
"etc",
".",
"to",
"a",
"MIZ",
"file"
] |
1c3e32711921d7e600e85558ffe5d337956372de
|
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/miz.py#L334-L364
|
244,988
|
universalcore/springboard
|
springboard/utils.py
|
config_dict
|
def config_dict(data):
"""
A function that takes a string of pair values, indicated by '=', separated
by newline characters and returns a dict of those value pairs
:param func context_func:
A function which takes one argument, a string of value pairs with
'= between them' separated by newline characters
:returns:
A dict containing the value pairs separated by newline characters
"""
lines = config_list(data)
return dict(re.split('\s*=\s*', value) for value in lines)
|
python
|
def config_dict(data):
"""
A function that takes a string of pair values, indicated by '=', separated
by newline characters and returns a dict of those value pairs
:param func context_func:
A function which takes one argument, a string of value pairs with
'= between them' separated by newline characters
:returns:
A dict containing the value pairs separated by newline characters
"""
lines = config_list(data)
return dict(re.split('\s*=\s*', value) for value in lines)
|
[
"def",
"config_dict",
"(",
"data",
")",
":",
"lines",
"=",
"config_list",
"(",
"data",
")",
"return",
"dict",
"(",
"re",
".",
"split",
"(",
"'\\s*=\\s*'",
",",
"value",
")",
"for",
"value",
"in",
"lines",
")"
] |
A function that takes a string of pair values, indicated by '=', separated
by newline characters and returns a dict of those value pairs
:param func context_func:
A function which takes one argument, a string of value pairs with
'= between them' separated by newline characters
:returns:
A dict containing the value pairs separated by newline characters
|
[
"A",
"function",
"that",
"takes",
"a",
"string",
"of",
"pair",
"values",
"indicated",
"by",
"=",
"separated",
"by",
"newline",
"characters",
"and",
"returns",
"a",
"dict",
"of",
"those",
"value",
"pairs"
] |
9f0393d310a1d2219613e8cd30cc52c75fb4dd61
|
https://github.com/universalcore/springboard/blob/9f0393d310a1d2219613e8cd30cc52c75fb4dd61/springboard/utils.py#L94-L108
|
244,989
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/writer.py
|
SoapWriter.Forget
|
def Forget(self, obj):
'''Forget we've seen this object.
'''
obj = _get_idstr(obj)
try:
self.memo.remove(obj)
except ValueError:
pass
|
python
|
def Forget(self, obj):
'''Forget we've seen this object.
'''
obj = _get_idstr(obj)
try:
self.memo.remove(obj)
except ValueError:
pass
|
[
"def",
"Forget",
"(",
"self",
",",
"obj",
")",
":",
"obj",
"=",
"_get_idstr",
"(",
"obj",
")",
"try",
":",
"self",
".",
"memo",
".",
"remove",
"(",
"obj",
")",
"except",
"ValueError",
":",
"pass"
] |
Forget we've seen this object.
|
[
"Forget",
"we",
"ve",
"seen",
"this",
"object",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/writer.py#L163-L170
|
244,990
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/writer.py
|
SoapWriter.close
|
def close(self):
'''Invoke all the callbacks, and close off the SOAP message.
'''
if self.closed: return
for func,arglist in self.callbacks:
apply(func, arglist)
self.closed = True
|
python
|
def close(self):
'''Invoke all the callbacks, and close off the SOAP message.
'''
if self.closed: return
for func,arglist in self.callbacks:
apply(func, arglist)
self.closed = True
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"closed",
":",
"return",
"for",
"func",
",",
"arglist",
"in",
"self",
".",
"callbacks",
":",
"apply",
"(",
"func",
",",
"arglist",
")",
"self",
".",
"closed",
"=",
"True"
] |
Invoke all the callbacks, and close off the SOAP message.
|
[
"Invoke",
"all",
"the",
"callbacks",
"and",
"close",
"off",
"the",
"SOAP",
"message",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/writer.py#L178-L184
|
244,991
|
spotify/gordon-janitor
|
gordon_janitor/main.py
|
setup
|
def setup(config_root=''):
"""
Service configuration and logging setup.
Configuration defined in ``gordon-janitor-user.toml`` will overwrite
``gordon-janitor.toml``.
Args:
config_root (str): where configuration should load from,
defaults to current working directory.
Returns:
A dict for Gordon service configuration
"""
config = _load_config(root=config_root)
logging_config = config.get('core', {}).get('logging', {})
log_level = logging_config.get('level', 'INFO').upper()
log_handlers = logging_config.get('handlers') or ['syslog']
ulogger.setup_logging(
progname='gordon-janitor', level=log_level, handlers=log_handlers)
return config
|
python
|
def setup(config_root=''):
"""
Service configuration and logging setup.
Configuration defined in ``gordon-janitor-user.toml`` will overwrite
``gordon-janitor.toml``.
Args:
config_root (str): where configuration should load from,
defaults to current working directory.
Returns:
A dict for Gordon service configuration
"""
config = _load_config(root=config_root)
logging_config = config.get('core', {}).get('logging', {})
log_level = logging_config.get('level', 'INFO').upper()
log_handlers = logging_config.get('handlers') or ['syslog']
ulogger.setup_logging(
progname='gordon-janitor', level=log_level, handlers=log_handlers)
return config
|
[
"def",
"setup",
"(",
"config_root",
"=",
"''",
")",
":",
"config",
"=",
"_load_config",
"(",
"root",
"=",
"config_root",
")",
"logging_config",
"=",
"config",
".",
"get",
"(",
"'core'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'logging'",
",",
"{",
"}",
")",
"log_level",
"=",
"logging_config",
".",
"get",
"(",
"'level'",
",",
"'INFO'",
")",
".",
"upper",
"(",
")",
"log_handlers",
"=",
"logging_config",
".",
"get",
"(",
"'handlers'",
")",
"or",
"[",
"'syslog'",
"]",
"ulogger",
".",
"setup_logging",
"(",
"progname",
"=",
"'gordon-janitor'",
",",
"level",
"=",
"log_level",
",",
"handlers",
"=",
"log_handlers",
")",
"return",
"config"
] |
Service configuration and logging setup.
Configuration defined in ``gordon-janitor-user.toml`` will overwrite
``gordon-janitor.toml``.
Args:
config_root (str): where configuration should load from,
defaults to current working directory.
Returns:
A dict for Gordon service configuration
|
[
"Service",
"configuration",
"and",
"logging",
"setup",
"."
] |
e0df2002caf3aac528818743d8d0717790957044
|
https://github.com/spotify/gordon-janitor/blob/e0df2002caf3aac528818743d8d0717790957044/gordon_janitor/main.py#L76-L98
|
244,992
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/__init__.py
|
STATUSES.by_value
|
def by_value(self, value, default=None):
"""
Returns the key for the given value
"""
try:
return [k for k, v in self.items() if v == value][0]
except IndexError:
if default is not None:
return default
raise ValueError('%s' % value)
|
python
|
def by_value(self, value, default=None):
"""
Returns the key for the given value
"""
try:
return [k for k, v in self.items() if v == value][0]
except IndexError:
if default is not None:
return default
raise ValueError('%s' % value)
|
[
"def",
"by_value",
"(",
"self",
",",
"value",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"return",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
"if",
"v",
"==",
"value",
"]",
"[",
"0",
"]",
"except",
"IndexError",
":",
"if",
"default",
"is",
"not",
"None",
":",
"return",
"default",
"raise",
"ValueError",
"(",
"'%s'",
"%",
"value",
")"
] |
Returns the key for the given value
|
[
"Returns",
"the",
"key",
"for",
"the",
"given",
"value"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/__init__.py#L21-L30
|
244,993
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_pattern
|
def add_pattern(self, pattern, root=".", depth=None, source_type=DefaultSourceType):
"""
Add a recursive folder scan using a linux-style patterns.
:param pattern: pattern or list of patterns to match.
:param root: root to start from (default to '.')
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(PatternSource(pattern, root, depth, **source_type))
return self
|
python
|
def add_pattern(self, pattern, root=".", depth=None, source_type=DefaultSourceType):
"""
Add a recursive folder scan using a linux-style patterns.
:param pattern: pattern or list of patterns to match.
:param root: root to start from (default to '.')
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(PatternSource(pattern, root, depth, **source_type))
return self
|
[
"def",
"add_pattern",
"(",
"self",
",",
"pattern",
",",
"root",
"=",
"\".\"",
",",
"depth",
"=",
"None",
",",
"source_type",
"=",
"DefaultSourceType",
")",
":",
"self",
".",
"add_source",
"(",
"PatternSource",
"(",
"pattern",
",",
"root",
",",
"depth",
",",
"*",
"*",
"source_type",
")",
")",
"return",
"self"
] |
Add a recursive folder scan using a linux-style patterns.
:param pattern: pattern or list of patterns to match.
:param root: root to start from (default to '.')
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
|
[
"Add",
"a",
"recursive",
"folder",
"scan",
"using",
"a",
"linux",
"-",
"style",
"patterns",
"."
] |
5372221b4049d5d46a9926573b91af17681c81f3
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L98-L108
|
244,994
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_filtered_folder
|
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively, with a regex filter on directories.
:param regex: regex string to filter folders by.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FilteredFolderSource(path, regex, depth, **source_type))
return self
|
python
|
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively, with a regex filter on directories.
:param regex: regex string to filter folders by.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FilteredFolderSource(path, regex, depth, **source_type))
return self
|
[
"def",
"add_filtered_folder",
"(",
"self",
",",
"path",
",",
"regex",
",",
"depth",
"=",
"None",
",",
"source_type",
"=",
"DefaultSourceType",
")",
":",
"self",
".",
"add_source",
"(",
"FilteredFolderSource",
"(",
"path",
",",
"regex",
",",
"depth",
",",
"*",
"*",
"source_type",
")",
")",
"return",
"self"
] |
Add a folder source to scan recursively, with a regex filter on directories.
:param regex: regex string to filter folders by.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
|
[
"Add",
"a",
"folder",
"source",
"to",
"scan",
"recursively",
"with",
"a",
"regex",
"filter",
"on",
"directories",
"."
] |
5372221b4049d5d46a9926573b91af17681c81f3
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L110-L119
|
244,995
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_filter
|
def add_filter(self, files_filter, filter_type=DefaultFilterType):
"""
Add a files filter to this iterator.
For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR.
:param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI.
:param filter_type: filter behavior, see FilterType for details.
"""
self.__filters.append((files_filter, filter_type))
return self
|
python
|
def add_filter(self, files_filter, filter_type=DefaultFilterType):
"""
Add a files filter to this iterator.
For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR.
:param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI.
:param filter_type: filter behavior, see FilterType for details.
"""
self.__filters.append((files_filter, filter_type))
return self
|
[
"def",
"add_filter",
"(",
"self",
",",
"files_filter",
",",
"filter_type",
"=",
"DefaultFilterType",
")",
":",
"self",
".",
"__filters",
".",
"append",
"(",
"(",
"files_filter",
",",
"filter_type",
")",
")",
"return",
"self"
] |
Add a files filter to this iterator.
For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR.
:param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI.
:param filter_type: filter behavior, see FilterType for details.
|
[
"Add",
"a",
"files",
"filter",
"to",
"this",
"iterator",
".",
"For",
"a",
"file",
"to",
"be",
"processed",
"it",
"must",
"match",
"ALL",
"filters",
"eg",
"they",
"are",
"added",
"with",
"ADD",
"not",
"OR",
"."
] |
5372221b4049d5d46a9926573b91af17681c81f3
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L121-L130
|
244,996
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_filter_by_pattern
|
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
"""
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
"""
self.add_filter(FilterPattern(pattern), filter_type)
return self
|
python
|
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
"""
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
"""
self.add_filter(FilterPattern(pattern), filter_type)
return self
|
[
"def",
"add_filter_by_pattern",
"(",
"self",
",",
"pattern",
",",
"filter_type",
"=",
"DefaultFilterType",
")",
":",
"self",
".",
"add_filter",
"(",
"FilterPattern",
"(",
"pattern",
")",
",",
"filter_type",
")",
"return",
"self"
] |
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
|
[
"Add",
"a",
"files",
"filter",
"by",
"linux",
"-",
"style",
"pattern",
"to",
"this",
"iterator",
"."
] |
5372221b4049d5d46a9926573b91af17681c81f3
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L132-L139
|
244,997
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_filter_by_regex
|
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
"""
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
"""
self.add_filter(FilterRegex(regex_expression), filter_type)
return self
|
python
|
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
"""
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
"""
self.add_filter(FilterRegex(regex_expression), filter_type)
return self
|
[
"def",
"add_filter_by_regex",
"(",
"self",
",",
"regex_expression",
",",
"filter_type",
"=",
"DefaultFilterType",
")",
":",
"self",
".",
"add_filter",
"(",
"FilterRegex",
"(",
"regex_expression",
")",
",",
"filter_type",
")",
"return",
"self"
] |
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
|
[
"Add",
"a",
"files",
"filter",
"by",
"regex",
"to",
"this",
"iterator",
"."
] |
5372221b4049d5d46a9926573b91af17681c81f3
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L141-L148
|
244,998
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_filter_by_extension
|
def add_filter_by_extension(self, extensions, filter_type=DefaultFilterType):
"""
Add a files filter by extensions to this iterator.
:param extensions: single extension or list of extensions to filter by.
for example: ["py", "js", "cpp", ...]
"""
self.add_filter(FilterExtension(extensions), filter_type)
return self
|
python
|
def add_filter_by_extension(self, extensions, filter_type=DefaultFilterType):
"""
Add a files filter by extensions to this iterator.
:param extensions: single extension or list of extensions to filter by.
for example: ["py", "js", "cpp", ...]
"""
self.add_filter(FilterExtension(extensions), filter_type)
return self
|
[
"def",
"add_filter_by_extension",
"(",
"self",
",",
"extensions",
",",
"filter_type",
"=",
"DefaultFilterType",
")",
":",
"self",
".",
"add_filter",
"(",
"FilterExtension",
"(",
"extensions",
")",
",",
"filter_type",
")",
"return",
"self"
] |
Add a files filter by extensions to this iterator.
:param extensions: single extension or list of extensions to filter by.
for example: ["py", "js", "cpp", ...]
|
[
"Add",
"a",
"files",
"filter",
"by",
"extensions",
"to",
"this",
"iterator",
"."
] |
5372221b4049d5d46a9926573b91af17681c81f3
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L150-L158
|
244,999
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.next
|
def next(self, dryrun=False):
"""
Iterate over files in all sources.
Use this if you want to iterate files externally.
:param dryrun: if true, will only return all filenames instead of processing them, eg will not
call "process_file" at all, and just show all the files it will scan.
"""
# call the start hook
self.on_start(dryrun)
# store current dir
curr_dir = ""
# iterate over sources
for src in self.__sources:
# call the start_source hook
self.on_start_source(src, dryrun)
# iterate over files
for filename in src.next():
# make sure file pass filters
if not self.match_filters(filename):
continue
# get curr dir to call the directory-enter hook
new_curr_dir = os.path.dirname(filename)
if new_curr_dir != curr_dir:
self.on_enter_dir(new_curr_dir, dryrun)
curr_dir = new_curr_dir
# process file
curr = self.process_file(filename, dryrun)
# if after process we still want to return file for external iteration, return it
if curr is not None:
yield curr
# call the end-source hook
self.on_end_source(src, dryrun)
# call the end iteration hook and raise stop iteration exception
self.on_end(dryrun)
raise StopIteration
|
python
|
def next(self, dryrun=False):
"""
Iterate over files in all sources.
Use this if you want to iterate files externally.
:param dryrun: if true, will only return all filenames instead of processing them, eg will not
call "process_file" at all, and just show all the files it will scan.
"""
# call the start hook
self.on_start(dryrun)
# store current dir
curr_dir = ""
# iterate over sources
for src in self.__sources:
# call the start_source hook
self.on_start_source(src, dryrun)
# iterate over files
for filename in src.next():
# make sure file pass filters
if not self.match_filters(filename):
continue
# get curr dir to call the directory-enter hook
new_curr_dir = os.path.dirname(filename)
if new_curr_dir != curr_dir:
self.on_enter_dir(new_curr_dir, dryrun)
curr_dir = new_curr_dir
# process file
curr = self.process_file(filename, dryrun)
# if after process we still want to return file for external iteration, return it
if curr is not None:
yield curr
# call the end-source hook
self.on_end_source(src, dryrun)
# call the end iteration hook and raise stop iteration exception
self.on_end(dryrun)
raise StopIteration
|
[
"def",
"next",
"(",
"self",
",",
"dryrun",
"=",
"False",
")",
":",
"# call the start hook",
"self",
".",
"on_start",
"(",
"dryrun",
")",
"# store current dir",
"curr_dir",
"=",
"\"\"",
"# iterate over sources",
"for",
"src",
"in",
"self",
".",
"__sources",
":",
"# call the start_source hook",
"self",
".",
"on_start_source",
"(",
"src",
",",
"dryrun",
")",
"# iterate over files",
"for",
"filename",
"in",
"src",
".",
"next",
"(",
")",
":",
"# make sure file pass filters",
"if",
"not",
"self",
".",
"match_filters",
"(",
"filename",
")",
":",
"continue",
"# get curr dir to call the directory-enter hook",
"new_curr_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
"if",
"new_curr_dir",
"!=",
"curr_dir",
":",
"self",
".",
"on_enter_dir",
"(",
"new_curr_dir",
",",
"dryrun",
")",
"curr_dir",
"=",
"new_curr_dir",
"# process file",
"curr",
"=",
"self",
".",
"process_file",
"(",
"filename",
",",
"dryrun",
")",
"# if after process we still want to return file for external iteration, return it",
"if",
"curr",
"is",
"not",
"None",
":",
"yield",
"curr",
"# call the end-source hook",
"self",
".",
"on_end_source",
"(",
"src",
",",
"dryrun",
")",
"# call the end iteration hook and raise stop iteration exception",
"self",
".",
"on_end",
"(",
"dryrun",
")",
"raise",
"StopIteration"
] |
Iterate over files in all sources.
Use this if you want to iterate files externally.
:param dryrun: if true, will only return all filenames instead of processing them, eg will not
call "process_file" at all, and just show all the files it will scan.
|
[
"Iterate",
"over",
"files",
"in",
"all",
"sources",
".",
"Use",
"this",
"if",
"you",
"want",
"to",
"iterate",
"files",
"externally",
"."
] |
5372221b4049d5d46a9926573b91af17681c81f3
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L190-L236
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.