id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
243,700
fakedrake/overlay_parse
overlay_parse/util.py
rx_int_extra
def rx_int_extra(rxmatch): """ We didn't just match an int but the int is what we need. """ rxmatch = re.search("\d+", rxmatch.group(0)) return int(rxmatch.group(0))
python
def rx_int_extra(rxmatch): """ We didn't just match an int but the int is what we need. """ rxmatch = re.search("\d+", rxmatch.group(0)) return int(rxmatch.group(0))
[ "def", "rx_int_extra", "(", "rxmatch", ")", ":", "rxmatch", "=", "re", ".", "search", "(", "\"\\d+\"", ",", "rxmatch", ".", "group", "(", "0", ")", ")", "return", "int", "(", "rxmatch", ".", "group", "(", "0", ")", ")" ]
We didn't just match an int but the int is what we need.
[ "We", "didn", "t", "just", "match", "an", "int", "but", "the", "int", "is", "what", "we", "need", "." ]
9ac362d6aef1ea41aff7375af088c6ebef93d0cd
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/util.py#L42-L48
243,701
lambdalisue/django-roughpages
src/roughpages/backends/decorators.py
prepare_filename_decorator
def prepare_filename_decorator(fn): """ A decorator of `prepare_filename` method 1. It automatically assign `settings.ROUGHPAGES_INDEX_FILENAME` if the `normalized_url` is ''. 2. It automatically assign file extensions to the output list. """ @wraps(fn) def inner(self, normalized_url, request): ext = settings.ROUGHPAGES_TEMPLATE_FILE_EXT if not normalized_url: normalized_url = settings.ROUGHPAGES_INDEX_FILENAME filenames = fn(self, normalized_url, request) filenames = [x + ext for x in filenames if x] return filenames return inner
python
def prepare_filename_decorator(fn): """ A decorator of `prepare_filename` method 1. It automatically assign `settings.ROUGHPAGES_INDEX_FILENAME` if the `normalized_url` is ''. 2. It automatically assign file extensions to the output list. """ @wraps(fn) def inner(self, normalized_url, request): ext = settings.ROUGHPAGES_TEMPLATE_FILE_EXT if not normalized_url: normalized_url = settings.ROUGHPAGES_INDEX_FILENAME filenames = fn(self, normalized_url, request) filenames = [x + ext for x in filenames if x] return filenames return inner
[ "def", "prepare_filename_decorator", "(", "fn", ")", ":", "@", "wraps", "(", "fn", ")", "def", "inner", "(", "self", ",", "normalized_url", ",", "request", ")", ":", "ext", "=", "settings", ".", "ROUGHPAGES_TEMPLATE_FILE_EXT", "if", "not", "normalized_url", ":", "normalized_url", "=", "settings", ".", "ROUGHPAGES_INDEX_FILENAME", "filenames", "=", "fn", "(", "self", ",", "normalized_url", ",", "request", ")", "filenames", "=", "[", "x", "+", "ext", "for", "x", "in", "filenames", "if", "x", "]", "return", "filenames", "return", "inner" ]
A decorator of `prepare_filename` method 1. It automatically assign `settings.ROUGHPAGES_INDEX_FILENAME` if the `normalized_url` is ''. 2. It automatically assign file extensions to the output list.
[ "A", "decorator", "of", "prepare_filename", "method" ]
f6a2724ece729c5deced2c2546d172561ef785ec
https://github.com/lambdalisue/django-roughpages/blob/f6a2724ece729c5deced2c2546d172561ef785ec/src/roughpages/backends/decorators.py#L9-L25
243,702
praekelt/panya
panya/templatetags/panya_template_tags.py
smart_query_string
def smart_query_string(parser, token): """ Outputs current GET query string with additions appended. Additions are provided in token pairs. """ args = token.split_contents() additions = args[1:] addition_pairs = [] while additions: addition_pairs.append(additions[0:2]) additions = additions[2:] return SmartQueryStringNode(addition_pairs)
python
def smart_query_string(parser, token): """ Outputs current GET query string with additions appended. Additions are provided in token pairs. """ args = token.split_contents() additions = args[1:] addition_pairs = [] while additions: addition_pairs.append(additions[0:2]) additions = additions[2:] return SmartQueryStringNode(addition_pairs)
[ "def", "smart_query_string", "(", "parser", ",", "token", ")", ":", "args", "=", "token", ".", "split_contents", "(", ")", "additions", "=", "args", "[", "1", ":", "]", "addition_pairs", "=", "[", "]", "while", "additions", ":", "addition_pairs", ".", "append", "(", "additions", "[", "0", ":", "2", "]", ")", "additions", "=", "additions", "[", "2", ":", "]", "return", "SmartQueryStringNode", "(", "addition_pairs", ")" ]
Outputs current GET query string with additions appended. Additions are provided in token pairs.
[ "Outputs", "current", "GET", "query", "string", "with", "additions", "appended", ".", "Additions", "are", "provided", "in", "token", "pairs", "." ]
0fd621e15a7c11a2716a9554a2f820d6259818e5
https://github.com/praekelt/panya/blob/0fd621e15a7c11a2716a9554a2f820d6259818e5/panya/templatetags/panya_template_tags.py#L12-L25
243,703
Zaeb0s/max-threads
maxthreads/maxthreads.py
MaxThreads.start_thread
def start_thread(self, target, args=(), kwargs=None, priority=0): """ To make sure applications work with the old name """ return self.add_task(target, args, kwargs, priority)
python
def start_thread(self, target, args=(), kwargs=None, priority=0): """ To make sure applications work with the old name """ return self.add_task(target, args, kwargs, priority)
[ "def", "start_thread", "(", "self", ",", "target", ",", "args", "=", "(", ")", ",", "kwargs", "=", "None", ",", "priority", "=", "0", ")", ":", "return", "self", ".", "add_task", "(", "target", ",", "args", ",", "kwargs", ",", "priority", ")" ]
To make sure applications work with the old name
[ "To", "make", "sure", "applications", "work", "with", "the", "old", "name" ]
dce4ae784aa1c07fdb910359c0099907047403f9
https://github.com/Zaeb0s/max-threads/blob/dce4ae784aa1c07fdb910359c0099907047403f9/maxthreads/maxthreads.py#L143-L146
243,704
dossier/dossier.models
dossier/models/query.py
list_projects
def list_projects(folders, folder = None, user = None): '''List all folders or all subfolders of a folder. If folder is provided, this method will output a list of subfolders contained by it. Otherwise, a list of all top-level folders is produced. :param folders: reference to folder.Folders instance :param folder: folder name or None :param user: optional user name ''' fid = None if folder is None else Folders.name_to_id(folder) # List all folders if none provided. if fid is None: for f in folders.folders(user): print(Folders.id_to_name(f)) return # List subfolders of a specific folder try: for sid in folders.subfolders(fid, user): print(Folders.id_to_name(sid)) except KeyError: print("E: folder not found: %s" %folder, file=sys.stderr)
python
def list_projects(folders, folder = None, user = None): '''List all folders or all subfolders of a folder. If folder is provided, this method will output a list of subfolders contained by it. Otherwise, a list of all top-level folders is produced. :param folders: reference to folder.Folders instance :param folder: folder name or None :param user: optional user name ''' fid = None if folder is None else Folders.name_to_id(folder) # List all folders if none provided. if fid is None: for f in folders.folders(user): print(Folders.id_to_name(f)) return # List subfolders of a specific folder try: for sid in folders.subfolders(fid, user): print(Folders.id_to_name(sid)) except KeyError: print("E: folder not found: %s" %folder, file=sys.stderr)
[ "def", "list_projects", "(", "folders", ",", "folder", "=", "None", ",", "user", "=", "None", ")", ":", "fid", "=", "None", "if", "folder", "is", "None", "else", "Folders", ".", "name_to_id", "(", "folder", ")", "# List all folders if none provided.", "if", "fid", "is", "None", ":", "for", "f", "in", "folders", ".", "folders", "(", "user", ")", ":", "print", "(", "Folders", ".", "id_to_name", "(", "f", ")", ")", "return", "# List subfolders of a specific folder", "try", ":", "for", "sid", "in", "folders", ".", "subfolders", "(", "fid", ",", "user", ")", ":", "print", "(", "Folders", ".", "id_to_name", "(", "sid", ")", ")", "except", "KeyError", ":", "print", "(", "\"E: folder not found: %s\"", "%", "folder", ",", "file", "=", "sys", ".", "stderr", ")" ]
List all folders or all subfolders of a folder. If folder is provided, this method will output a list of subfolders contained by it. Otherwise, a list of all top-level folders is produced. :param folders: reference to folder.Folders instance :param folder: folder name or None :param user: optional user name
[ "List", "all", "folders", "or", "all", "subfolders", "of", "a", "folder", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/query.py#L39-L64
243,705
slarse/clanimtk
clanimtk/util.py
get_supervisor
def get_supervisor(func: types.AnyFunction) -> types.Supervisor: """Get the appropriate supervisor to use and pre-apply the function. Args: func: A function. """ if not callable(func): raise TypeError("func is not callable") if asyncio.iscoroutinefunction(func): supervisor = _async_supervisor else: supervisor = _sync_supervisor return functools.partial(supervisor, func)
python
def get_supervisor(func: types.AnyFunction) -> types.Supervisor: """Get the appropriate supervisor to use and pre-apply the function. Args: func: A function. """ if not callable(func): raise TypeError("func is not callable") if asyncio.iscoroutinefunction(func): supervisor = _async_supervisor else: supervisor = _sync_supervisor return functools.partial(supervisor, func)
[ "def", "get_supervisor", "(", "func", ":", "types", ".", "AnyFunction", ")", "->", "types", ".", "Supervisor", ":", "if", "not", "callable", "(", "func", ")", ":", "raise", "TypeError", "(", "\"func is not callable\"", ")", "if", "asyncio", ".", "iscoroutinefunction", "(", "func", ")", ":", "supervisor", "=", "_async_supervisor", "else", ":", "supervisor", "=", "_sync_supervisor", "return", "functools", ".", "partial", "(", "supervisor", ",", "func", ")" ]
Get the appropriate supervisor to use and pre-apply the function. Args: func: A function.
[ "Get", "the", "appropriate", "supervisor", "to", "use", "and", "pre", "-", "apply", "the", "function", "." ]
cb93d2e914c3ecc4e0007745ff4d546318cf3902
https://github.com/slarse/clanimtk/blob/cb93d2e914c3ecc4e0007745ff4d546318cf3902/clanimtk/util.py#L23-L35
243,706
slarse/clanimtk
clanimtk/util.py
_async_supervisor
async def _async_supervisor(func, animation_, step, *args, **kwargs): """Supervisor for running an animation with an asynchronous function. Args: func: A function to be run alongside an animation. animation_: An infinite generator that produces strings for the animation. step: Seconds between each animation frame. *args: Arguments for func. **kwargs: Keyword arguments for func. Returns: The result of func(*args, **kwargs) Raises: Any exception that is thrown when executing func. """ with ThreadPoolExecutor(max_workers=2) as pool: with _terminating_event() as event: pool.submit(animate_cli, animation_, step, event) result = await func(*args, **kwargs) return result
python
async def _async_supervisor(func, animation_, step, *args, **kwargs): """Supervisor for running an animation with an asynchronous function. Args: func: A function to be run alongside an animation. animation_: An infinite generator that produces strings for the animation. step: Seconds between each animation frame. *args: Arguments for func. **kwargs: Keyword arguments for func. Returns: The result of func(*args, **kwargs) Raises: Any exception that is thrown when executing func. """ with ThreadPoolExecutor(max_workers=2) as pool: with _terminating_event() as event: pool.submit(animate_cli, animation_, step, event) result = await func(*args, **kwargs) return result
[ "async", "def", "_async_supervisor", "(", "func", ",", "animation_", ",", "step", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "ThreadPoolExecutor", "(", "max_workers", "=", "2", ")", "as", "pool", ":", "with", "_terminating_event", "(", ")", "as", "event", ":", "pool", ".", "submit", "(", "animate_cli", ",", "animation_", ",", "step", ",", "event", ")", "result", "=", "await", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "result" ]
Supervisor for running an animation with an asynchronous function. Args: func: A function to be run alongside an animation. animation_: An infinite generator that produces strings for the animation. step: Seconds between each animation frame. *args: Arguments for func. **kwargs: Keyword arguments for func. Returns: The result of func(*args, **kwargs) Raises: Any exception that is thrown when executing func.
[ "Supervisor", "for", "running", "an", "animation", "with", "an", "asynchronous", "function", "." ]
cb93d2e914c3ecc4e0007745ff4d546318cf3902
https://github.com/slarse/clanimtk/blob/cb93d2e914c3ecc4e0007745ff4d546318cf3902/clanimtk/util.py#L52-L71
243,707
slarse/clanimtk
clanimtk/util.py
concatechain
def concatechain(*generators: types.FrameGenerator, separator: str = ''): """Return a generator that in each iteration takes one value from each of the supplied generators, joins them together with the specified separator and yields the result. Stops as soon as any iterator raises StopIteration and returns the value contained in it. Primarily created for chaining string generators, hence the name. Args: generators: Any number of generators that yield types that can be joined together with the separator string. separator: A separator to insert between each value yielded by the different generators. Returns: A generator that yields strings that are the concatenation of one value from each of the generators, joined together with the separator string. """ while True: try: next_ = [next(gen) for gen in generators] yield separator.join(next_) except StopIteration as exc: return exc.value
python
def concatechain(*generators: types.FrameGenerator, separator: str = ''): """Return a generator that in each iteration takes one value from each of the supplied generators, joins them together with the specified separator and yields the result. Stops as soon as any iterator raises StopIteration and returns the value contained in it. Primarily created for chaining string generators, hence the name. Args: generators: Any number of generators that yield types that can be joined together with the separator string. separator: A separator to insert between each value yielded by the different generators. Returns: A generator that yields strings that are the concatenation of one value from each of the generators, joined together with the separator string. """ while True: try: next_ = [next(gen) for gen in generators] yield separator.join(next_) except StopIteration as exc: return exc.value
[ "def", "concatechain", "(", "*", "generators", ":", "types", ".", "FrameGenerator", ",", "separator", ":", "str", "=", "''", ")", ":", "while", "True", ":", "try", ":", "next_", "=", "[", "next", "(", "gen", ")", "for", "gen", "in", "generators", "]", "yield", "separator", ".", "join", "(", "next_", ")", "except", "StopIteration", "as", "exc", ":", "return", "exc", ".", "value" ]
Return a generator that in each iteration takes one value from each of the supplied generators, joins them together with the specified separator and yields the result. Stops as soon as any iterator raises StopIteration and returns the value contained in it. Primarily created for chaining string generators, hence the name. Args: generators: Any number of generators that yield types that can be joined together with the separator string. separator: A separator to insert between each value yielded by the different generators. Returns: A generator that yields strings that are the concatenation of one value from each of the generators, joined together with the separator string.
[ "Return", "a", "generator", "that", "in", "each", "iteration", "takes", "one", "value", "from", "each", "of", "the", "supplied", "generators", "joins", "them", "together", "with", "the", "specified", "separator", "and", "yields", "the", "result", ".", "Stops", "as", "soon", "as", "any", "iterator", "raises", "StopIteration", "and", "returns", "the", "value", "contained", "in", "it", "." ]
cb93d2e914c3ecc4e0007745ff4d546318cf3902
https://github.com/slarse/clanimtk/blob/cb93d2e914c3ecc4e0007745ff4d546318cf3902/clanimtk/util.py#L94-L116
243,708
staticshock/scientist.py
scientist/experiment.py
Experiment.compare
def compare(self, control_result, experimental_result): _compare = getattr(self, '_compare', lambda x, y: x == y) """ Return true if the results match. """ return ( # Mismatch if only one of the results returned an error, or if # different types of errors were returned. type(control_result.error) is type(experimental_result.error) and _compare(control_result.value, experimental_result.value) )
python
def compare(self, control_result, experimental_result): _compare = getattr(self, '_compare', lambda x, y: x == y) """ Return true if the results match. """ return ( # Mismatch if only one of the results returned an error, or if # different types of errors were returned. type(control_result.error) is type(experimental_result.error) and _compare(control_result.value, experimental_result.value) )
[ "def", "compare", "(", "self", ",", "control_result", ",", "experimental_result", ")", ":", "_compare", "=", "getattr", "(", "self", ",", "'_compare'", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ")", "return", "(", "# Mismatch if only one of the results returned an error, or if", "# different types of errors were returned.", "type", "(", "control_result", ".", "error", ")", "is", "type", "(", "experimental_result", ".", "error", ")", "and", "_compare", "(", "control_result", ".", "value", ",", "experimental_result", ".", "value", ")", ")" ]
Return true if the results match.
[ "Return", "true", "if", "the", "results", "match", "." ]
68ec0b57989d1f78614aa3b17ce196e2b53cfe25
https://github.com/staticshock/scientist.py/blob/68ec0b57989d1f78614aa3b17ce196e2b53cfe25/scientist/experiment.py#L104-L114
243,709
bwesterb/tkbd
src/ruuster.py
Ruuster.fetch_inst_id
def fetch_inst_id(self): """ Fetches the institute id of the RU """ try: for d in msgpack.unpack(urllib2.urlopen( "%s/list/institutes?format=msgpack" % self.url)): if d['name'] == 'Radboud Universiteit Nijmegen': return d['id'] except IOError, e: # urllib2 exceptions are a subclass of IOError raise RuusterError(e) assert False
python
def fetch_inst_id(self): """ Fetches the institute id of the RU """ try: for d in msgpack.unpack(urllib2.urlopen( "%s/list/institutes?format=msgpack" % self.url)): if d['name'] == 'Radboud Universiteit Nijmegen': return d['id'] except IOError, e: # urllib2 exceptions are a subclass of IOError raise RuusterError(e) assert False
[ "def", "fetch_inst_id", "(", "self", ")", ":", "try", ":", "for", "d", "in", "msgpack", ".", "unpack", "(", "urllib2", ".", "urlopen", "(", "\"%s/list/institutes?format=msgpack\"", "%", "self", ".", "url", ")", ")", ":", "if", "d", "[", "'name'", "]", "==", "'Radboud Universiteit Nijmegen'", ":", "return", "d", "[", "'id'", "]", "except", "IOError", ",", "e", ":", "# urllib2 exceptions are a subclass of IOError", "raise", "RuusterError", "(", "e", ")", "assert", "False" ]
Fetches the institute id of the RU
[ "Fetches", "the", "institute", "id", "of", "the", "RU" ]
fcf16977d38a93fe9b7fa198513007ab9921b650
https://github.com/bwesterb/tkbd/blob/fcf16977d38a93fe9b7fa198513007ab9921b650/src/ruuster.py#L31-L40
243,710
collectiveacuity/labPack
labpack/records/ip.py
get_ip
def get_ip(source='aws'): ''' a method to get current public ip address of machine ''' if source == 'aws': source_url = 'http://checkip.amazonaws.com/' else: raise Exception('get_ip currently only supports queries to aws') import requests try: response = requests.get(url=source_url) except Exception as err: from labpack.handlers.requests import handle_requests from requests import Request request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) current_ip = response.content.decode() current_ip = current_ip.strip() return current_ip
python
def get_ip(source='aws'): ''' a method to get current public ip address of machine ''' if source == 'aws': source_url = 'http://checkip.amazonaws.com/' else: raise Exception('get_ip currently only supports queries to aws') import requests try: response = requests.get(url=source_url) except Exception as err: from labpack.handlers.requests import handle_requests from requests import Request request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) current_ip = response.content.decode() current_ip = current_ip.strip() return current_ip
[ "def", "get_ip", "(", "source", "=", "'aws'", ")", ":", "if", "source", "==", "'aws'", ":", "source_url", "=", "'http://checkip.amazonaws.com/'", "else", ":", "raise", "Exception", "(", "'get_ip currently only supports queries to aws'", ")", "import", "requests", "try", ":", "response", "=", "requests", ".", "get", "(", "url", "=", "source_url", ")", "except", "Exception", "as", "err", ":", "from", "labpack", ".", "handlers", ".", "requests", "import", "handle_requests", "from", "requests", "import", "Request", "request_object", "=", "Request", "(", "method", "=", "'GET'", ",", "url", "=", "source_url", ")", "request_details", "=", "handle_requests", "(", "request_object", ")", "raise", "Exception", "(", "request_details", "[", "'error'", "]", ")", "current_ip", "=", "response", ".", "content", ".", "decode", "(", ")", "current_ip", "=", "current_ip", ".", "strip", "(", ")", "return", "current_ip" ]
a method to get current public ip address of machine
[ "a", "method", "to", "get", "current", "public", "ip", "address", "of", "machine" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/records/ip.py#L5-L26
243,711
rikrd/inspire
inspirespeech/htk.py
create_parameter
def create_parameter(samples, sample_period): """Create a HTK Parameter object from an array of samples and a samples period :param samples (list of lists or array of floats): The samples to write into the file. Usually feature vectors. :param sample_period (int): Sample period in 100ns units. """ parm_kind_str = 'USER' parm_kind = _htk_str_to_param(parm_kind_str) parm_kind_base, parm_kind_opts = _htk_str_to_param(parm_kind_str) meta = ParameterMeta(n_samples=len(samples), samp_period=sample_period, samp_size=len(samples[0]) * 4, # size in bytes parm_kind_str=parm_kind_str, parm_kind=parm_kind, parm_kind_base=parm_kind_base, parm_kind_opts=parm_kind_opts) return Parameter(meta=meta, samples=np.array(samples))
python
def create_parameter(samples, sample_period): """Create a HTK Parameter object from an array of samples and a samples period :param samples (list of lists or array of floats): The samples to write into the file. Usually feature vectors. :param sample_period (int): Sample period in 100ns units. """ parm_kind_str = 'USER' parm_kind = _htk_str_to_param(parm_kind_str) parm_kind_base, parm_kind_opts = _htk_str_to_param(parm_kind_str) meta = ParameterMeta(n_samples=len(samples), samp_period=sample_period, samp_size=len(samples[0]) * 4, # size in bytes parm_kind_str=parm_kind_str, parm_kind=parm_kind, parm_kind_base=parm_kind_base, parm_kind_opts=parm_kind_opts) return Parameter(meta=meta, samples=np.array(samples))
[ "def", "create_parameter", "(", "samples", ",", "sample_period", ")", ":", "parm_kind_str", "=", "'USER'", "parm_kind", "=", "_htk_str_to_param", "(", "parm_kind_str", ")", "parm_kind_base", ",", "parm_kind_opts", "=", "_htk_str_to_param", "(", "parm_kind_str", ")", "meta", "=", "ParameterMeta", "(", "n_samples", "=", "len", "(", "samples", ")", ",", "samp_period", "=", "sample_period", ",", "samp_size", "=", "len", "(", "samples", "[", "0", "]", ")", "*", "4", ",", "# size in bytes", "parm_kind_str", "=", "parm_kind_str", ",", "parm_kind", "=", "parm_kind", ",", "parm_kind_base", "=", "parm_kind_base", ",", "parm_kind_opts", "=", "parm_kind_opts", ")", "return", "Parameter", "(", "meta", "=", "meta", ",", "samples", "=", "np", ".", "array", "(", "samples", ")", ")" ]
Create a HTK Parameter object from an array of samples and a samples period :param samples (list of lists or array of floats): The samples to write into the file. Usually feature vectors. :param sample_period (int): Sample period in 100ns units.
[ "Create", "a", "HTK", "Parameter", "object", "from", "an", "array", "of", "samples", "and", "a", "samples", "period" ]
e281c0266a9a9633f34ab70f9c3ad58036c19b59
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/htk.py#L201-L220
243,712
rikrd/inspire
inspirespeech/htk.py
load_mlf
def load_mlf(filename, utf8_normalization=None): """Load an HTK Master Label File. :param filename: The filename of the MLF file. :param utf8_normalization: None """ with codecs.open(filename, 'r', 'string_escape') as f: data = f.read().decode('utf8') if utf8_normalization: data = unicodedata.normalize(utf8_normalization, data) mlfs = {} for mlf_object in HTK_MLF_RE.finditer(data): mlfs[mlf_object.group('file')] = [[Label(**mo.groupdict()) for mo in HTK_HYPOTHESIS_RE.finditer(recognition_data)] for recognition_data in re.split(r'\n///\n', mlf_object.group('hypotheses'))] return mlfs
python
def load_mlf(filename, utf8_normalization=None): """Load an HTK Master Label File. :param filename: The filename of the MLF file. :param utf8_normalization: None """ with codecs.open(filename, 'r', 'string_escape') as f: data = f.read().decode('utf8') if utf8_normalization: data = unicodedata.normalize(utf8_normalization, data) mlfs = {} for mlf_object in HTK_MLF_RE.finditer(data): mlfs[mlf_object.group('file')] = [[Label(**mo.groupdict()) for mo in HTK_HYPOTHESIS_RE.finditer(recognition_data)] for recognition_data in re.split(r'\n///\n', mlf_object.group('hypotheses'))] return mlfs
[ "def", "load_mlf", "(", "filename", ",", "utf8_normalization", "=", "None", ")", ":", "with", "codecs", ".", "open", "(", "filename", ",", "'r'", ",", "'string_escape'", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", ".", "decode", "(", "'utf8'", ")", "if", "utf8_normalization", ":", "data", "=", "unicodedata", ".", "normalize", "(", "utf8_normalization", ",", "data", ")", "mlfs", "=", "{", "}", "for", "mlf_object", "in", "HTK_MLF_RE", ".", "finditer", "(", "data", ")", ":", "mlfs", "[", "mlf_object", ".", "group", "(", "'file'", ")", "]", "=", "[", "[", "Label", "(", "*", "*", "mo", ".", "groupdict", "(", ")", ")", "for", "mo", "in", "HTK_HYPOTHESIS_RE", ".", "finditer", "(", "recognition_data", ")", "]", "for", "recognition_data", "in", "re", ".", "split", "(", "r'\\n///\\n'", ",", "mlf_object", ".", "group", "(", "'hypotheses'", ")", ")", "]", "return", "mlfs" ]
Load an HTK Master Label File. :param filename: The filename of the MLF file. :param utf8_normalization: None
[ "Load", "an", "HTK", "Master", "Label", "File", "." ]
e281c0266a9a9633f34ab70f9c3ad58036c19b59
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/htk.py#L223-L242
243,713
rikrd/inspire
inspirespeech/htk.py
save_mlf
def save_mlf(mlf, output_filename): """Save an HTK Master Label File. :param mlf: MLF dictionary containing a mapping from file to list of annotations. :param output_filename: The file where to save the MLF """ with codecs.open(output_filename, 'w', 'utf-8') as f: f.write(u'#!MLF!#\n') for k, v in mlf.items(): f.write(u'"{}"\n'.format(k)) for labels in v: for label in labels: line = u'{start} {end} {symbol} ' \ u'{loglikelihood} {word}'.format(start=label.start or '', end=label.end or '', symbol=label.symbol or '', loglikelihood=label.log_likelihood or '', word=label.word or '') f.write(u'{}\n'.format(line.strip())) f.write(u'.\n')
python
def save_mlf(mlf, output_filename): """Save an HTK Master Label File. :param mlf: MLF dictionary containing a mapping from file to list of annotations. :param output_filename: The file where to save the MLF """ with codecs.open(output_filename, 'w', 'utf-8') as f: f.write(u'#!MLF!#\n') for k, v in mlf.items(): f.write(u'"{}"\n'.format(k)) for labels in v: for label in labels: line = u'{start} {end} {symbol} ' \ u'{loglikelihood} {word}'.format(start=label.start or '', end=label.end or '', symbol=label.symbol or '', loglikelihood=label.log_likelihood or '', word=label.word or '') f.write(u'{}\n'.format(line.strip())) f.write(u'.\n')
[ "def", "save_mlf", "(", "mlf", ",", "output_filename", ")", ":", "with", "codecs", ".", "open", "(", "output_filename", ",", "'w'", ",", "'utf-8'", ")", "as", "f", ":", "f", ".", "write", "(", "u'#!MLF!#\\n'", ")", "for", "k", ",", "v", "in", "mlf", ".", "items", "(", ")", ":", "f", ".", "write", "(", "u'\"{}\"\\n'", ".", "format", "(", "k", ")", ")", "for", "labels", "in", "v", ":", "for", "label", "in", "labels", ":", "line", "=", "u'{start} {end} {symbol} '", "u'{loglikelihood} {word}'", ".", "format", "(", "start", "=", "label", ".", "start", "or", "''", ",", "end", "=", "label", ".", "end", "or", "''", ",", "symbol", "=", "label", ".", "symbol", "or", "''", ",", "loglikelihood", "=", "label", ".", "log_likelihood", "or", "''", ",", "word", "=", "label", ".", "word", "or", "''", ")", "f", ".", "write", "(", "u'{}\\n'", ".", "format", "(", "line", ".", "strip", "(", ")", ")", ")", "f", ".", "write", "(", "u'.\\n'", ")" ]
Save an HTK Master Label File. :param mlf: MLF dictionary containing a mapping from file to list of annotations. :param output_filename: The file where to save the MLF
[ "Save", "an", "HTK", "Master", "Label", "File", "." ]
e281c0266a9a9633f34ab70f9c3ad58036c19b59
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/htk.py#L245-L265
243,714
rikrd/inspire
inspirespeech/htk.py
main
def main(): """Test code called from commandline""" model = load_model('../data/hmmdefs') hmm = model.hmms['r-We'] for state_name in hmm.state_names: print(state_name) state = model.states[state_name] print(state.means_) print(model) model2 = load_model('../data/prior.hmm1mixSI.rate32') print(model2)
python
def main(): """Test code called from commandline""" model = load_model('../data/hmmdefs') hmm = model.hmms['r-We'] for state_name in hmm.state_names: print(state_name) state = model.states[state_name] print(state.means_) print(model) model2 = load_model('../data/prior.hmm1mixSI.rate32') print(model2)
[ "def", "main", "(", ")", ":", "model", "=", "load_model", "(", "'../data/hmmdefs'", ")", "hmm", "=", "model", ".", "hmms", "[", "'r-We'", "]", "for", "state_name", "in", "hmm", ".", "state_names", ":", "print", "(", "state_name", ")", "state", "=", "model", ".", "states", "[", "state_name", "]", "print", "(", "state", ".", "means_", ")", "print", "(", "model", ")", "model2", "=", "load_model", "(", "'../data/prior.hmm1mixSI.rate32'", ")", "print", "(", "model2", ")" ]
Test code called from commandline
[ "Test", "code", "called", "from", "commandline" ]
e281c0266a9a9633f34ab70f9c3ad58036c19b59
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/htk.py#L283-L293
243,715
sassoo/goldman
goldman/models/base.py
Model.to_lower
def to_lower(cls): # NOQA """ Return a list of all the fields that should be lowercased This is done on fields with `lower=True`. """ email = cls.get_fields_by_class(EmailType) lower = cls.get_fields_by_prop('lower', True) + email return list(set(email + lower))
python
def to_lower(cls): # NOQA """ Return a list of all the fields that should be lowercased This is done on fields with `lower=True`. """ email = cls.get_fields_by_class(EmailType) lower = cls.get_fields_by_prop('lower', True) + email return list(set(email + lower))
[ "def", "to_lower", "(", "cls", ")", ":", "# NOQA", "email", "=", "cls", ".", "get_fields_by_class", "(", "EmailType", ")", "lower", "=", "cls", ".", "get_fields_by_prop", "(", "'lower'", ",", "True", ")", "+", "email", "return", "list", "(", "set", "(", "email", "+", "lower", ")", ")" ]
Return a list of all the fields that should be lowercased This is done on fields with `lower=True`.
[ "Return", "a", "list", "of", "all", "the", "fields", "that", "should", "be", "lowercased" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/base.py#L84-L93
243,716
sassoo/goldman
goldman/models/base.py
Model.get_fields_by_class
def get_fields_by_class(cls, field_class): """ Return a list of field names matching a field class :param field_class: field class object :return: list """ ret = [] for key, val in getattr(cls, '_fields').items(): if isinstance(val, field_class): ret.append(key) return ret
python
def get_fields_by_class(cls, field_class): """ Return a list of field names matching a field class :param field_class: field class object :return: list """ ret = [] for key, val in getattr(cls, '_fields').items(): if isinstance(val, field_class): ret.append(key) return ret
[ "def", "get_fields_by_class", "(", "cls", ",", "field_class", ")", ":", "ret", "=", "[", "]", "for", "key", ",", "val", "in", "getattr", "(", "cls", ",", "'_fields'", ")", ".", "items", "(", ")", ":", "if", "isinstance", "(", "val", ",", "field_class", ")", ":", "ret", ".", "append", "(", "key", ")", "return", "ret" ]
Return a list of field names matching a field class :param field_class: field class object :return: list
[ "Return", "a", "list", "of", "field", "names", "matching", "a", "field", "class" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/base.py#L108-L120
243,717
sassoo/goldman
goldman/models/base.py
Model.get_fields_with_prop
def get_fields_with_prop(cls, prop_key): """ Return a list of fields with a prop key defined Each list item will be a tuple of field name containing the prop key & the value of that prop key. :param prop_key: key name :return: list of tuples """ ret = [] for key, val in getattr(cls, '_fields').items(): if hasattr(val, prop_key): ret.append((key, getattr(val, prop_key))) return ret
python
def get_fields_with_prop(cls, prop_key): """ Return a list of fields with a prop key defined Each list item will be a tuple of field name containing the prop key & the value of that prop key. :param prop_key: key name :return: list of tuples """ ret = [] for key, val in getattr(cls, '_fields').items(): if hasattr(val, prop_key): ret.append((key, getattr(val, prop_key))) return ret
[ "def", "get_fields_with_prop", "(", "cls", ",", "prop_key", ")", ":", "ret", "=", "[", "]", "for", "key", ",", "val", "in", "getattr", "(", "cls", ",", "'_fields'", ")", ".", "items", "(", ")", ":", "if", "hasattr", "(", "val", ",", "prop_key", ")", ":", "ret", ".", "append", "(", "(", "key", ",", "getattr", "(", "val", ",", "prop_key", ")", ")", ")", "return", "ret" ]
Return a list of fields with a prop key defined Each list item will be a tuple of field name containing the prop key & the value of that prop key. :param prop_key: key name :return: list of tuples
[ "Return", "a", "list", "of", "fields", "with", "a", "prop", "key", "defined" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/base.py#L139-L154
243,718
sassoo/goldman
goldman/models/base.py
Model.to_exceptions
def to_exceptions(cls, errors): """ Convert the validation errors into ValidationFailure exc's Transform native schematics validation errors into a goldman ValidationFailure exception. :param errors: dict of errors in schematics format :return: list of ValidationFailure exception objects """ ret = [] for key, val in errors.items(): if key in cls.relationships: attr = '/data/relationships/%s' % key else: attr = '/data/attributes/%s' % key for error in val: ret.append(ValidationFailure(attr, detail=error)) return ret
python
def to_exceptions(cls, errors): """ Convert the validation errors into ValidationFailure exc's Transform native schematics validation errors into a goldman ValidationFailure exception. :param errors: dict of errors in schematics format :return: list of ValidationFailure exception objects """ ret = [] for key, val in errors.items(): if key in cls.relationships: attr = '/data/relationships/%s' % key else: attr = '/data/attributes/%s' % key for error in val: ret.append(ValidationFailure(attr, detail=error)) return ret
[ "def", "to_exceptions", "(", "cls", ",", "errors", ")", ":", "ret", "=", "[", "]", "for", "key", ",", "val", "in", "errors", ".", "items", "(", ")", ":", "if", "key", "in", "cls", ".", "relationships", ":", "attr", "=", "'/data/relationships/%s'", "%", "key", "else", ":", "attr", "=", "'/data/attributes/%s'", "%", "key", "for", "error", "in", "val", ":", "ret", ".", "append", "(", "ValidationFailure", "(", "attr", ",", "detail", "=", "error", ")", ")", "return", "ret" ]
Convert the validation errors into ValidationFailure exc's Transform native schematics validation errors into a goldman ValidationFailure exception. :param errors: dict of errors in schematics format :return: list of ValidationFailure exception objects
[ "Convert", "the", "validation", "errors", "into", "ValidationFailure", "exc", "s" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/base.py#L157-L180
243,719
sassoo/goldman
goldman/models/base.py
Model.dirty_fields
def dirty_fields(self): """ Return an array of field names that are dirty Dirty means if a model was hydrated first from the store & then had field values changed they are now considered dirty. For new models all fields are considered dirty. :return: list """ dirty_fields = [] for field in self.all_fields: if field not in self._original: dirty_fields.append(field) elif self._original[field] != getattr(self, field): dirty_fields.append(field) return dirty_fields
python
def dirty_fields(self): """ Return an array of field names that are dirty Dirty means if a model was hydrated first from the store & then had field values changed they are now considered dirty. For new models all fields are considered dirty. :return: list """ dirty_fields = [] for field in self.all_fields: if field not in self._original: dirty_fields.append(field) elif self._original[field] != getattr(self, field): dirty_fields.append(field) return dirty_fields
[ "def", "dirty_fields", "(", "self", ")", ":", "dirty_fields", "=", "[", "]", "for", "field", "in", "self", ".", "all_fields", ":", "if", "field", "not", "in", "self", ".", "_original", ":", "dirty_fields", ".", "append", "(", "field", ")", "elif", "self", ".", "_original", "[", "field", "]", "!=", "getattr", "(", "self", ",", "field", ")", ":", "dirty_fields", ".", "append", "(", "field", ")", "return", "dirty_fields" ]
Return an array of field names that are dirty Dirty means if a model was hydrated first from the store & then had field values changed they are now considered dirty. For new models all fields are considered dirty. :return: list
[ "Return", "an", "array", "of", "field", "names", "that", "are", "dirty" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/base.py#L189-L208
243,720
sassoo/goldman
goldman/models/base.py
Model.merge
def merge(self, data, clean=False, validate=False): """ Merge a dict with the model This is needed because schematics doesn't auto cast values when assigned. This method allows us to ensure incoming data & existing data on a model are always coerced properly. We create a temporary model instance with just the new data so all the features of schematics deserialization are still available. :param data: dict of potentially new different data to merge :param clean: set the dirty bit back to clean. This is useful when the merge is coming from the store where the data could have been mutated & the new merged in data is now the single source of truth. :param validate: run the schematics validate method :return: nothing.. it has mutation side effects """ try: model = self.__class__(data) except ConversionError as errors: abort(self.to_exceptions(errors.messages)) for key, val in model.to_native().items(): if key in data: setattr(self, key, val) if validate: try: self.validate() except ModelValidationError as errors: abort(self.to_exceptions(errors.messages)) if clean: self._original = self.to_native()
python
def merge(self, data, clean=False, validate=False): """ Merge a dict with the model This is needed because schematics doesn't auto cast values when assigned. This method allows us to ensure incoming data & existing data on a model are always coerced properly. We create a temporary model instance with just the new data so all the features of schematics deserialization are still available. :param data: dict of potentially new different data to merge :param clean: set the dirty bit back to clean. This is useful when the merge is coming from the store where the data could have been mutated & the new merged in data is now the single source of truth. :param validate: run the schematics validate method :return: nothing.. it has mutation side effects """ try: model = self.__class__(data) except ConversionError as errors: abort(self.to_exceptions(errors.messages)) for key, val in model.to_native().items(): if key in data: setattr(self, key, val) if validate: try: self.validate() except ModelValidationError as errors: abort(self.to_exceptions(errors.messages)) if clean: self._original = self.to_native()
[ "def", "merge", "(", "self", ",", "data", ",", "clean", "=", "False", ",", "validate", "=", "False", ")", ":", "try", ":", "model", "=", "self", ".", "__class__", "(", "data", ")", "except", "ConversionError", "as", "errors", ":", "abort", "(", "self", ".", "to_exceptions", "(", "errors", ".", "messages", ")", ")", "for", "key", ",", "val", "in", "model", ".", "to_native", "(", ")", ".", "items", "(", ")", ":", "if", "key", "in", "data", ":", "setattr", "(", "self", ",", "key", ",", "val", ")", "if", "validate", ":", "try", ":", "self", ".", "validate", "(", ")", "except", "ModelValidationError", "as", "errors", ":", "abort", "(", "self", ".", "to_exceptions", "(", "errors", ".", "messages", ")", ")", "if", "clean", ":", "self", ".", "_original", "=", "self", ".", "to_native", "(", ")" ]
Merge a dict with the model This is needed because schematics doesn't auto cast values when assigned. This method allows us to ensure incoming data & existing data on a model are always coerced properly. We create a temporary model instance with just the new data so all the features of schematics deserialization are still available. :param data: dict of potentially new different data to merge :param clean: set the dirty bit back to clean. This is useful when the merge is coming from the store where the data could have been mutated & the new merged in data is now the single source of truth. :param validate: run the schematics validate method :return: nothing.. it has mutation side effects
[ "Merge", "a", "dict", "with", "the", "model" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/base.py#L222-L266
243,721
sassoo/goldman
goldman/models/base.py
Model.to_primitive
def to_primitive(self, load_rels=None, sparse_fields=None, *args, **kwargs): """ Override the schematics native to_primitive method :param loads_rels: List of field names that are relationships that should be loaded for the serialization process. This needs to be run before the native schematics to_primitive is run so the proper data is serialized. :param sparse_fields: List of field names that can be provided which limits the serialization to ONLY those field names. A whitelist effectively. """ if load_rels: for rel in load_rels: getattr(self, rel).load() data = super(Model, self).to_primitive(*args, **kwargs) if sparse_fields: for key in data.keys(): if key not in sparse_fields: del data[key] return data
python
def to_primitive(self, load_rels=None, sparse_fields=None, *args, **kwargs): """ Override the schematics native to_primitive method :param loads_rels: List of field names that are relationships that should be loaded for the serialization process. This needs to be run before the native schematics to_primitive is run so the proper data is serialized. :param sparse_fields: List of field names that can be provided which limits the serialization to ONLY those field names. A whitelist effectively. """ if load_rels: for rel in load_rels: getattr(self, rel).load() data = super(Model, self).to_primitive(*args, **kwargs) if sparse_fields: for key in data.keys(): if key not in sparse_fields: del data[key] return data
[ "def", "to_primitive", "(", "self", ",", "load_rels", "=", "None", ",", "sparse_fields", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "load_rels", ":", "for", "rel", "in", "load_rels", ":", "getattr", "(", "self", ",", "rel", ")", ".", "load", "(", ")", "data", "=", "super", "(", "Model", ",", "self", ")", ".", "to_primitive", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "sparse_fields", ":", "for", "key", "in", "data", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "sparse_fields", ":", "del", "data", "[", "key", "]", "return", "data" ]
Override the schematics native to_primitive method :param loads_rels: List of field names that are relationships that should be loaded for the serialization process. This needs to be run before the native schematics to_primitive is run so the proper data is serialized. :param sparse_fields: List of field names that can be provided which limits the serialization to ONLY those field names. A whitelist effectively.
[ "Override", "the", "schematics", "native", "to_primitive", "method" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/base.py#L268-L295
243,722
rosenbrockc/acorn
acorn/analyze/sklearn.py
stash_split
def stash_split(fqdn, result, *argl, **argd): """Stashes the split between training and testing sets so that it can be used later for automatic scoring of the models in the log. """ global _splits if fqdn == "sklearn.cross_validation.train_test_split": key = id(result[1]) _splits[key] = result #We don't actually want to return anything for the analysis; we are using it #as a hook to save pointers to the dataset split so that we can easily #analyze performance later on. return None
python
def stash_split(fqdn, result, *argl, **argd): """Stashes the split between training and testing sets so that it can be used later for automatic scoring of the models in the log. """ global _splits if fqdn == "sklearn.cross_validation.train_test_split": key = id(result[1]) _splits[key] = result #We don't actually want to return anything for the analysis; we are using it #as a hook to save pointers to the dataset split so that we can easily #analyze performance later on. return None
[ "def", "stash_split", "(", "fqdn", ",", "result", ",", "*", "argl", ",", "*", "*", "argd", ")", ":", "global", "_splits", "if", "fqdn", "==", "\"sklearn.cross_validation.train_test_split\"", ":", "key", "=", "id", "(", "result", "[", "1", "]", ")", "_splits", "[", "key", "]", "=", "result", "#We don't actually want to return anything for the analysis; we are using it", "#as a hook to save pointers to the dataset split so that we can easily", "#analyze performance later on.", "return", "None" ]
Stashes the split between training and testing sets so that it can be used later for automatic scoring of the models in the log.
[ "Stashes", "the", "split", "between", "training", "and", "testing", "sets", "so", "that", "it", "can", "be", "used", "later", "for", "automatic", "scoring", "of", "the", "models", "in", "the", "log", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/analyze/sklearn.py#L57-L69
243,723
rosenbrockc/acorn
acorn/analyze/sklearn.py
_machine_fqdn
def _machine_fqdn(machine): """Returns the FQDN of the given learning machine. """ from acorn.logging.decoration import _fqdn if hasattr(machine, "__class__"): return _fqdn(machine.__class__, False) else: # pragma: no cover #See what FQDN can get out of the class instance. return _fqdn(machine)
python
def _machine_fqdn(machine): """Returns the FQDN of the given learning machine. """ from acorn.logging.decoration import _fqdn if hasattr(machine, "__class__"): return _fqdn(machine.__class__, False) else: # pragma: no cover #See what FQDN can get out of the class instance. return _fqdn(machine)
[ "def", "_machine_fqdn", "(", "machine", ")", ":", "from", "acorn", ".", "logging", ".", "decoration", "import", "_fqdn", "if", "hasattr", "(", "machine", ",", "\"__class__\"", ")", ":", "return", "_fqdn", "(", "machine", ".", "__class__", ",", "False", ")", "else", ":", "# pragma: no cover", "#See what FQDN can get out of the class instance.", "return", "_fqdn", "(", "machine", ")" ]
Returns the FQDN of the given learning machine.
[ "Returns", "the", "FQDN", "of", "the", "given", "learning", "machine", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/analyze/sklearn.py#L71-L79
243,724
rosenbrockc/acorn
acorn/analyze/sklearn.py
fit
def fit(fqdn, result, *argl, **argd): """Analyzes the result of a generic fit operation performed by `sklearn`. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call. """ #Check the arguments to see what kind of data we are working with, then #choose the appropriate function below to return the analysis dictionary. #The first positional argument will be the instance of the machine that was #used. Check its name against a list. global _machines out = None if len(argl) > 0: machine = argl[0] #We save pointers to the machine that was just fit so that we can figure #out later what training data was used for analysis purposes. key = id(machine) _machines[key] = (machine, argl[0], argl[1]) if isclassifier(machine): out = classify_fit(fqdn, result, *argl, **argd) elif isregressor(machine): out = regress_fit(fqdn, result, *argl, **argd) return out
python
def fit(fqdn, result, *argl, **argd): """Analyzes the result of a generic fit operation performed by `sklearn`. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call. """ #Check the arguments to see what kind of data we are working with, then #choose the appropriate function below to return the analysis dictionary. #The first positional argument will be the instance of the machine that was #used. Check its name against a list. global _machines out = None if len(argl) > 0: machine = argl[0] #We save pointers to the machine that was just fit so that we can figure #out later what training data was used for analysis purposes. key = id(machine) _machines[key] = (machine, argl[0], argl[1]) if isclassifier(machine): out = classify_fit(fqdn, result, *argl, **argd) elif isregressor(machine): out = regress_fit(fqdn, result, *argl, **argd) return out
[ "def", "fit", "(", "fqdn", ",", "result", ",", "*", "argl", ",", "*", "*", "argd", ")", ":", "#Check the arguments to see what kind of data we are working with, then", "#choose the appropriate function below to return the analysis dictionary.", "#The first positional argument will be the instance of the machine that was", "#used. Check its name against a list.", "global", "_machines", "out", "=", "None", "if", "len", "(", "argl", ")", ">", "0", ":", "machine", "=", "argl", "[", "0", "]", "#We save pointers to the machine that was just fit so that we can figure", "#out later what training data was used for analysis purposes.", "key", "=", "id", "(", "machine", ")", "_machines", "[", "key", "]", "=", "(", "machine", ",", "argl", "[", "0", "]", ",", "argl", "[", "1", "]", ")", "if", "isclassifier", "(", "machine", ")", ":", "out", "=", "classify_fit", "(", "fqdn", ",", "result", ",", "*", "argl", ",", "*", "*", "argd", ")", "elif", "isregressor", "(", "machine", ")", ":", "out", "=", "regress_fit", "(", "fqdn", ",", "result", ",", "*", "argl", ",", "*", "*", "argd", ")", "return", "out" ]
Analyzes the result of a generic fit operation performed by `sklearn`. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call.
[ "Analyzes", "the", "result", "of", "a", "generic", "fit", "operation", "performed", "by", "sklearn", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/analyze/sklearn.py#L95-L122
243,725
rosenbrockc/acorn
acorn/analyze/sklearn.py
predict
def predict(fqdn, result, *argl, **argd): """Analyzes the result of a generic predict operation performed by `sklearn`. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call. """ #Check the arguments to see what kind of data we are working with, then #choose the appropriate function below to return the analysis dictionary. out = None if len(argl) > 0: machine = argl[0] if isclassifier(machine): out = classify_predict(fqdn, result, None, *argl, **argd) elif isregressor(machine): out = regress_predict(fqdn, result, None, *argl, **argd) return out
python
def predict(fqdn, result, *argl, **argd): """Analyzes the result of a generic predict operation performed by `sklearn`. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call. """ #Check the arguments to see what kind of data we are working with, then #choose the appropriate function below to return the analysis dictionary. out = None if len(argl) > 0: machine = argl[0] if isclassifier(machine): out = classify_predict(fqdn, result, None, *argl, **argd) elif isregressor(machine): out = regress_predict(fqdn, result, None, *argl, **argd) return out
[ "def", "predict", "(", "fqdn", ",", "result", ",", "*", "argl", ",", "*", "*", "argd", ")", ":", "#Check the arguments to see what kind of data we are working with, then", "#choose the appropriate function below to return the analysis dictionary.", "out", "=", "None", "if", "len", "(", "argl", ")", ">", "0", ":", "machine", "=", "argl", "[", "0", "]", "if", "isclassifier", "(", "machine", ")", ":", "out", "=", "classify_predict", "(", "fqdn", ",", "result", ",", "None", ",", "*", "argl", ",", "*", "*", "argd", ")", "elif", "isregressor", "(", "machine", ")", ":", "out", "=", "regress_predict", "(", "fqdn", ",", "result", ",", "None", ",", "*", "argl", ",", "*", "*", "argd", ")", "return", "out" ]
Analyzes the result of a generic predict operation performed by `sklearn`. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call.
[ "Analyzes", "the", "result", "of", "a", "generic", "predict", "operation", "performed", "by", "sklearn", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/analyze/sklearn.py#L124-L143
243,726
rosenbrockc/acorn
acorn/analyze/sklearn.py
_do_auto_predict
def _do_auto_predict(machine, X, *args): """Performs an automatic prediction for the specified machine and returns the predicted values. """ if auto_predict and hasattr(machine, "predict"): return machine.predict(X)
python
def _do_auto_predict(machine, X, *args): """Performs an automatic prediction for the specified machine and returns the predicted values. """ if auto_predict and hasattr(machine, "predict"): return machine.predict(X)
[ "def", "_do_auto_predict", "(", "machine", ",", "X", ",", "*", "args", ")", ":", "if", "auto_predict", "and", "hasattr", "(", "machine", ",", "\"predict\"", ")", ":", "return", "machine", ".", "predict", "(", "X", ")" ]
Performs an automatic prediction for the specified machine and returns the predicted values.
[ "Performs", "an", "automatic", "prediction", "for", "the", "specified", "machine", "and", "returns", "the", "predicted", "values", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/analyze/sklearn.py#L145-L150
243,727
rosenbrockc/acorn
acorn/analyze/sklearn.py
_generic_fit
def _generic_fit(fqdn, result, scorer, yP=None, *argl, **argd): """Performs the generic fit tests that are common to both classifier and regressor; uses `scorer` to score the predicted values given by the machine when tested against its training set. Args: scorer (function): called on the result of `machine.predict(Xtrain, ytrain)`. """ out = None if len(argl) > 0: machine = argl[0] out = {} if hasattr(machine, "best_score_"): out["score"] = machine.best_score_ #With fitting it is often useful to know how well the fitting set was #matched (by trying to predict a score on it). We can do this #automatically and show the result to the user. yL = _do_auto_predict(*argl[0:2]) yscore = scorer(fqdn, yL, yP, *argl, **argd) if yscore is not None: out.update(yscore) return out
python
def _generic_fit(fqdn, result, scorer, yP=None, *argl, **argd): """Performs the generic fit tests that are common to both classifier and regressor; uses `scorer` to score the predicted values given by the machine when tested against its training set. Args: scorer (function): called on the result of `machine.predict(Xtrain, ytrain)`. """ out = None if len(argl) > 0: machine = argl[0] out = {} if hasattr(machine, "best_score_"): out["score"] = machine.best_score_ #With fitting it is often useful to know how well the fitting set was #matched (by trying to predict a score on it). We can do this #automatically and show the result to the user. yL = _do_auto_predict(*argl[0:2]) yscore = scorer(fqdn, yL, yP, *argl, **argd) if yscore is not None: out.update(yscore) return out
[ "def", "_generic_fit", "(", "fqdn", ",", "result", ",", "scorer", ",", "yP", "=", "None", ",", "*", "argl", ",", "*", "*", "argd", ")", ":", "out", "=", "None", "if", "len", "(", "argl", ")", ">", "0", ":", "machine", "=", "argl", "[", "0", "]", "out", "=", "{", "}", "if", "hasattr", "(", "machine", ",", "\"best_score_\"", ")", ":", "out", "[", "\"score\"", "]", "=", "machine", ".", "best_score_", "#With fitting it is often useful to know how well the fitting set was", "#matched (by trying to predict a score on it). We can do this", "#automatically and show the result to the user.", "yL", "=", "_do_auto_predict", "(", "*", "argl", "[", "0", ":", "2", "]", ")", "yscore", "=", "scorer", "(", "fqdn", ",", "yL", ",", "yP", ",", "*", "argl", ",", "*", "*", "argd", ")", "if", "yscore", "is", "not", "None", ":", "out", ".", "update", "(", "yscore", ")", "return", "out" ]
Performs the generic fit tests that are common to both classifier and regressor; uses `scorer` to score the predicted values given by the machine when tested against its training set. Args: scorer (function): called on the result of `machine.predict(Xtrain, ytrain)`.
[ "Performs", "the", "generic", "fit", "tests", "that", "are", "common", "to", "both", "classifier", "and", "regressor", ";", "uses", "scorer", "to", "score", "the", "predicted", "values", "given", "by", "the", "machine", "when", "tested", "against", "its", "training", "set", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/analyze/sklearn.py#L152-L176
243,728
rosenbrockc/acorn
acorn/analyze/sklearn.py
_percent_match
def _percent_match(result, out, yP=None, *argl): """Returns the percent match for the specified prediction call; requires that the data was split before using an analyzed method. Args: out (dict): output dictionary to save the result to. """ if len(argl) > 1: if yP is None: Xt = argl[1] key = id(Xt) if key in _splits: yP = _splits[key][3] if yP is not None: import math out["%"] = round(1.-sum(abs(yP - result))/float(len(result)), 3)
python
def _percent_match(result, out, yP=None, *argl): """Returns the percent match for the specified prediction call; requires that the data was split before using an analyzed method. Args: out (dict): output dictionary to save the result to. """ if len(argl) > 1: if yP is None: Xt = argl[1] key = id(Xt) if key in _splits: yP = _splits[key][3] if yP is not None: import math out["%"] = round(1.-sum(abs(yP - result))/float(len(result)), 3)
[ "def", "_percent_match", "(", "result", ",", "out", ",", "yP", "=", "None", ",", "*", "argl", ")", ":", "if", "len", "(", "argl", ")", ">", "1", ":", "if", "yP", "is", "None", ":", "Xt", "=", "argl", "[", "1", "]", "key", "=", "id", "(", "Xt", ")", "if", "key", "in", "_splits", ":", "yP", "=", "_splits", "[", "key", "]", "[", "3", "]", "if", "yP", "is", "not", "None", ":", "import", "math", "out", "[", "\"%\"", "]", "=", "round", "(", "1.", "-", "sum", "(", "abs", "(", "yP", "-", "result", ")", ")", "/", "float", "(", "len", "(", "result", ")", ")", ",", "3", ")" ]
Returns the percent match for the specified prediction call; requires that the data was split before using an analyzed method. Args: out (dict): output dictionary to save the result to.
[ "Returns", "the", "percent", "match", "for", "the", "specified", "prediction", "call", ";", "requires", "that", "the", "data", "was", "split", "before", "using", "an", "analyzed", "method", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/analyze/sklearn.py#L188-L204
243,729
basvandenbroek/gcloud_taskqueue
gcloud_taskqueue/task.py
Task.path
def path(self): """Getter property for the URL path to this Task. :rtype: string :returns: The URL path to this task. """ if not self.id: raise ValueError('Cannot determine path without a task id.') return self.path_helper(self.taskqueue.path, self.id)
python
def path(self): """Getter property for the URL path to this Task. :rtype: string :returns: The URL path to this task. """ if not self.id: raise ValueError('Cannot determine path without a task id.') return self.path_helper(self.taskqueue.path, self.id)
[ "def", "path", "(", "self", ")", ":", "if", "not", "self", ".", "id", ":", "raise", "ValueError", "(", "'Cannot determine path without a task id.'", ")", "return", "self", ".", "path_helper", "(", "self", ".", "taskqueue", ".", "path", ",", "self", ".", "id", ")" ]
Getter property for the URL path to this Task. :rtype: string :returns: The URL path to this task.
[ "Getter", "property", "for", "the", "URL", "path", "to", "this", "Task", "." ]
b147b57f7c0ad9e8030ee9797d6526a448aa5007
https://github.com/basvandenbroek/gcloud_taskqueue/blob/b147b57f7c0ad9e8030ee9797d6526a448aa5007/gcloud_taskqueue/task.py#L77-L86
243,730
basvandenbroek/gcloud_taskqueue
gcloud_taskqueue/task.py
Task.delete
def delete(self, client=None): """Deletes a task from Task Queue. :type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the task's taskqueue. :rtype: :class:`Task` :returns: The task that was just deleted. :raises: :class:`gcloud.exceptions.NotFound` (propagated from :meth:`gcloud.taskqueue.taskqueue.Taskqueue.delete_task`). """ return self.taskqueue.delete_task(self.id, client=client)
python
def delete(self, client=None): """Deletes a task from Task Queue. :type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the task's taskqueue. :rtype: :class:`Task` :returns: The task that was just deleted. :raises: :class:`gcloud.exceptions.NotFound` (propagated from :meth:`gcloud.taskqueue.taskqueue.Taskqueue.delete_task`). """ return self.taskqueue.delete_task(self.id, client=client)
[ "def", "delete", "(", "self", ",", "client", "=", "None", ")", ":", "return", "self", ".", "taskqueue", ".", "delete_task", "(", "self", ".", "id", ",", "client", "=", "client", ")" ]
Deletes a task from Task Queue. :type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the task's taskqueue. :rtype: :class:`Task` :returns: The task that was just deleted. :raises: :class:`gcloud.exceptions.NotFound` (propagated from :meth:`gcloud.taskqueue.taskqueue.Taskqueue.delete_task`).
[ "Deletes", "a", "task", "from", "Task", "Queue", "." ]
b147b57f7c0ad9e8030ee9797d6526a448aa5007
https://github.com/basvandenbroek/gcloud_taskqueue/blob/b147b57f7c0ad9e8030ee9797d6526a448aa5007/gcloud_taskqueue/task.py#L93-L106
243,731
basvandenbroek/gcloud_taskqueue
gcloud_taskqueue/task.py
Task.update
def update(self, new_lease_time, client=None): """Update the duration of a task lease :type new_lease_time: int :param new_lease_time: the new lease time in seconds. :type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the task's taskqueue. :rtype: :class:`Task` :returns: The task that was just updated. :raises: :class:`gcloud.exceptions.NotFound` (propagated from :meth:`gcloud.taskqueue.taskqueue.Taskqueue.update_task`). """ return self.taskqueue.update_task(self.id, new_lease_time=new_lease_time, client=client)
python
def update(self, new_lease_time, client=None): """Update the duration of a task lease :type new_lease_time: int :param new_lease_time: the new lease time in seconds. :type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the task's taskqueue. :rtype: :class:`Task` :returns: The task that was just updated. :raises: :class:`gcloud.exceptions.NotFound` (propagated from :meth:`gcloud.taskqueue.taskqueue.Taskqueue.update_task`). """ return self.taskqueue.update_task(self.id, new_lease_time=new_lease_time, client=client)
[ "def", "update", "(", "self", ",", "new_lease_time", ",", "client", "=", "None", ")", ":", "return", "self", ".", "taskqueue", ".", "update_task", "(", "self", ".", "id", ",", "new_lease_time", "=", "new_lease_time", ",", "client", "=", "client", ")" ]
Update the duration of a task lease :type new_lease_time: int :param new_lease_time: the new lease time in seconds. :type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the task's taskqueue. :rtype: :class:`Task` :returns: The task that was just updated. :raises: :class:`gcloud.exceptions.NotFound` (propagated from :meth:`gcloud.taskqueue.taskqueue.Taskqueue.update_task`).
[ "Update", "the", "duration", "of", "a", "task", "lease" ]
b147b57f7c0ad9e8030ee9797d6526a448aa5007
https://github.com/basvandenbroek/gcloud_taskqueue/blob/b147b57f7c0ad9e8030ee9797d6526a448aa5007/gcloud_taskqueue/task.py#L108-L124
243,732
basvandenbroek/gcloud_taskqueue
gcloud_taskqueue/task.py
Task.description
def description(self): """The description for this task. See: https://cloud.google.com/appengine/docs/python/taskqueue/rest/tasks :rtype: string :returns: The description for this task. """ if self._description is None: if 'payloadBase64' not in self._properties: self._properties = self.taskqueue.get_task(id=self.id)._properties self._description = base64.b64decode(self._properties.get('payloadBase64', b'')).decode("ascii") return self._description
python
def description(self): """The description for this task. See: https://cloud.google.com/appengine/docs/python/taskqueue/rest/tasks :rtype: string :returns: The description for this task. """ if self._description is None: if 'payloadBase64' not in self._properties: self._properties = self.taskqueue.get_task(id=self.id)._properties self._description = base64.b64decode(self._properties.get('payloadBase64', b'')).decode("ascii") return self._description
[ "def", "description", "(", "self", ")", ":", "if", "self", ".", "_description", "is", "None", ":", "if", "'payloadBase64'", "not", "in", "self", ".", "_properties", ":", "self", ".", "_properties", "=", "self", ".", "taskqueue", ".", "get_task", "(", "id", "=", "self", ".", "id", ")", ".", "_properties", "self", ".", "_description", "=", "base64", ".", "b64decode", "(", "self", ".", "_properties", ".", "get", "(", "'payloadBase64'", ",", "b''", ")", ")", ".", "decode", "(", "\"ascii\"", ")", "return", "self", ".", "_description" ]
The description for this task. See: https://cloud.google.com/appengine/docs/python/taskqueue/rest/tasks :rtype: string :returns: The description for this task.
[ "The", "description", "for", "this", "task", "." ]
b147b57f7c0ad9e8030ee9797d6526a448aa5007
https://github.com/basvandenbroek/gcloud_taskqueue/blob/b147b57f7c0ad9e8030ee9797d6526a448aa5007/gcloud_taskqueue/task.py#L162-L174
243,733
basvandenbroek/gcloud_taskqueue
gcloud_taskqueue/task.py
Task.time_enqueued
def time_enqueued(self): """Retrieve the timestamp at which the task was enqueued. See: https://cloud.google.com/appengine/docs/python/taskqueue/rest/tasks :rtype: :class:`datetime.datetime` or ``NoneType`` :returns: Datetime object parsed from microsecond timestamp, or ``None`` if the property is not set locally. """ value = self._properties.get('enqueueTimestamp') if value is not None: return _datetime_from_microseconds(int(value))
python
def time_enqueued(self): """Retrieve the timestamp at which the task was enqueued. See: https://cloud.google.com/appengine/docs/python/taskqueue/rest/tasks :rtype: :class:`datetime.datetime` or ``NoneType`` :returns: Datetime object parsed from microsecond timestamp, or ``None`` if the property is not set locally. """ value = self._properties.get('enqueueTimestamp') if value is not None: return _datetime_from_microseconds(int(value))
[ "def", "time_enqueued", "(", "self", ")", ":", "value", "=", "self", ".", "_properties", ".", "get", "(", "'enqueueTimestamp'", ")", "if", "value", "is", "not", "None", ":", "return", "_datetime_from_microseconds", "(", "int", "(", "value", ")", ")" ]
Retrieve the timestamp at which the task was enqueued. See: https://cloud.google.com/appengine/docs/python/taskqueue/rest/tasks :rtype: :class:`datetime.datetime` or ``NoneType`` :returns: Datetime object parsed from microsecond timestamp, or ``None`` if the property is not set locally.
[ "Retrieve", "the", "timestamp", "at", "which", "the", "task", "was", "enqueued", "." ]
b147b57f7c0ad9e8030ee9797d6526a448aa5007
https://github.com/basvandenbroek/gcloud_taskqueue/blob/b147b57f7c0ad9e8030ee9797d6526a448aa5007/gcloud_taskqueue/task.py#L177-L188
243,734
stormy-ua/DeepLearningToy
src/pydeeptoy/computational_graph.py
ComputationalGraph.conv2d
def conv2d(self, x_in: Connection, w_in: Connection, receptive_field_size, filters_number, stride=1, padding=1, name=""): """ Computes a 2-D convolution given 4-D input and filter tensors. """ x_cols = self.tensor_3d_to_cols(x_in, receptive_field_size, stride=stride, padding=padding) mul = self.transpose(self.matrix_multiply(x_cols, w_in), 0, 2, 1) #output_width = self.sum(self.div(self.sum(self.sum(self.shape(x_in, 2), self.constant(-1 * receptive_field_size)), # self.constant(2 * padding)), self.constant(stride)), self.constant(1)) # output_height = (h - f + 2 * p) / s + 1 output = self.reshape(mul, (-1, filters_number, receptive_field_size, receptive_field_size)) output.name = name return output
python
def conv2d(self, x_in: Connection, w_in: Connection, receptive_field_size, filters_number, stride=1, padding=1, name=""): """ Computes a 2-D convolution given 4-D input and filter tensors. """ x_cols = self.tensor_3d_to_cols(x_in, receptive_field_size, stride=stride, padding=padding) mul = self.transpose(self.matrix_multiply(x_cols, w_in), 0, 2, 1) #output_width = self.sum(self.div(self.sum(self.sum(self.shape(x_in, 2), self.constant(-1 * receptive_field_size)), # self.constant(2 * padding)), self.constant(stride)), self.constant(1)) # output_height = (h - f + 2 * p) / s + 1 output = self.reshape(mul, (-1, filters_number, receptive_field_size, receptive_field_size)) output.name = name return output
[ "def", "conv2d", "(", "self", ",", "x_in", ":", "Connection", ",", "w_in", ":", "Connection", ",", "receptive_field_size", ",", "filters_number", ",", "stride", "=", "1", ",", "padding", "=", "1", ",", "name", "=", "\"\"", ")", ":", "x_cols", "=", "self", ".", "tensor_3d_to_cols", "(", "x_in", ",", "receptive_field_size", ",", "stride", "=", "stride", ",", "padding", "=", "padding", ")", "mul", "=", "self", ".", "transpose", "(", "self", ".", "matrix_multiply", "(", "x_cols", ",", "w_in", ")", ",", "0", ",", "2", ",", "1", ")", "#output_width = self.sum(self.div(self.sum(self.sum(self.shape(x_in, 2), self.constant(-1 * receptive_field_size)),", "# self.constant(2 * padding)), self.constant(stride)), self.constant(1))", "# output_height = (h - f + 2 * p) / s + 1", "output", "=", "self", ".", "reshape", "(", "mul", ",", "(", "-", "1", ",", "filters_number", ",", "receptive_field_size", ",", "receptive_field_size", ")", ")", "output", ".", "name", "=", "name", "return", "output" ]
Computes a 2-D convolution given 4-D input and filter tensors.
[ "Computes", "a", "2", "-", "D", "convolution", "given", "4", "-", "D", "input", "and", "filter", "tensors", "." ]
7ab0814ec0491a575ebee8eb1ccd61ce4bf9554b
https://github.com/stormy-ua/DeepLearningToy/blob/7ab0814ec0491a575ebee8eb1ccd61ce4bf9554b/src/pydeeptoy/computational_graph.py#L141-L156
243,735
beatmax/redmodel
redmodel/models/writer.py
SortedSetFieldWriter.append
def append(self, hcont, value, score = None): """ If sort_field is specified, score must be None. If sort_field is not specified, score is mandatory. """ assert (score is None) != (self.field.sort_field is None) if score is None: score = getattr(value, self.field.sort_field.name) ContainerFieldWriter.append(self, hcont, value, score)
python
def append(self, hcont, value, score = None): """ If sort_field is specified, score must be None. If sort_field is not specified, score is mandatory. """ assert (score is None) != (self.field.sort_field is None) if score is None: score = getattr(value, self.field.sort_field.name) ContainerFieldWriter.append(self, hcont, value, score)
[ "def", "append", "(", "self", ",", "hcont", ",", "value", ",", "score", "=", "None", ")", ":", "assert", "(", "score", "is", "None", ")", "!=", "(", "self", ".", "field", ".", "sort_field", "is", "None", ")", "if", "score", "is", "None", ":", "score", "=", "getattr", "(", "value", ",", "self", ".", "field", ".", "sort_field", ".", "name", ")", "ContainerFieldWriter", ".", "append", "(", "self", ",", "hcont", ",", "value", ",", "score", ")" ]
If sort_field is specified, score must be None. If sort_field is not specified, score is mandatory.
[ "If", "sort_field", "is", "specified", "score", "must", "be", "None", ".", "If", "sort_field", "is", "not", "specified", "score", "is", "mandatory", "." ]
1df545ecd8e36e8addeaac124db32d9d0e22938c
https://github.com/beatmax/redmodel/blob/1df545ecd8e36e8addeaac124db32d9d0e22938c/redmodel/models/writer.py#L203-L209
243,736
fogcitymarathoner/s3_mysql_backup
s3_mysql_backup/copy_file.py
copy_file
def copy_file(aws_access_key_id, aws_secret_access_key, bucket_name, file, s3_folder): """ copies file to bucket s3_folder """ # Connect to the bucket bucket = s3_bucket(aws_access_key_id, aws_secret_access_key, bucket_name) key = boto.s3.key.Key(bucket) if s3_folder: target_name = '%s/%s' % (s3_folder, os.path.basename(file)) else: target_name = os.path.basename(file) key.key = target_name print('Uploading %s to %s' % (file, target_name)) key.set_contents_from_filename(file) print('Upload %s FINISHED: %s' % (file, dt.now()))
python
def copy_file(aws_access_key_id, aws_secret_access_key, bucket_name, file, s3_folder): """ copies file to bucket s3_folder """ # Connect to the bucket bucket = s3_bucket(aws_access_key_id, aws_secret_access_key, bucket_name) key = boto.s3.key.Key(bucket) if s3_folder: target_name = '%s/%s' % (s3_folder, os.path.basename(file)) else: target_name = os.path.basename(file) key.key = target_name print('Uploading %s to %s' % (file, target_name)) key.set_contents_from_filename(file) print('Upload %s FINISHED: %s' % (file, dt.now()))
[ "def", "copy_file", "(", "aws_access_key_id", ",", "aws_secret_access_key", ",", "bucket_name", ",", "file", ",", "s3_folder", ")", ":", "# Connect to the bucket", "bucket", "=", "s3_bucket", "(", "aws_access_key_id", ",", "aws_secret_access_key", ",", "bucket_name", ")", "key", "=", "boto", ".", "s3", ".", "key", ".", "Key", "(", "bucket", ")", "if", "s3_folder", ":", "target_name", "=", "'%s/%s'", "%", "(", "s3_folder", ",", "os", ".", "path", ".", "basename", "(", "file", ")", ")", "else", ":", "target_name", "=", "os", ".", "path", ".", "basename", "(", "file", ")", "key", ".", "key", "=", "target_name", "print", "(", "'Uploading %s to %s'", "%", "(", "file", ",", "target_name", ")", ")", "key", ".", "set_contents_from_filename", "(", "file", ")", "print", "(", "'Upload %s FINISHED: %s'", "%", "(", "file", ",", "dt", ".", "now", "(", ")", ")", ")" ]
copies file to bucket s3_folder
[ "copies", "file", "to", "bucket", "s3_folder" ]
8a0fb3e51a7b873eb4287d4954548a0dbab0e734
https://github.com/fogcitymarathoner/s3_mysql_backup/blob/8a0fb3e51a7b873eb4287d4954548a0dbab0e734/s3_mysql_backup/copy_file.py#L8-L26
243,737
sliem/barrett
example/plot.py
main
def main(): """ We here demostrate the basic functionality of barrett. We use a global scan of scalar dark matter as an example. The details aren't really important. """ dataset = 'RD' observables = ['log(<\sigma v>)', '\Omega_{\chi}h^2', 'log(\sigma_p^{SI})'] var = ['log(m_{\chi})'] var += ['log(C_1)', 'log(C_2)', 'log(C_3)', 'log(C_4)', 'log(C_5)', 'log(C_6)'] var += observables plot_vs_mass(dataset, observables, 'mass_vs_observables.png') plot_oneD(dataset, var, 'oneD.png') pairplot(dataset, var, 'pairplot.png')
python
def main(): """ We here demostrate the basic functionality of barrett. We use a global scan of scalar dark matter as an example. The details aren't really important. """ dataset = 'RD' observables = ['log(<\sigma v>)', '\Omega_{\chi}h^2', 'log(\sigma_p^{SI})'] var = ['log(m_{\chi})'] var += ['log(C_1)', 'log(C_2)', 'log(C_3)', 'log(C_4)', 'log(C_5)', 'log(C_6)'] var += observables plot_vs_mass(dataset, observables, 'mass_vs_observables.png') plot_oneD(dataset, var, 'oneD.png') pairplot(dataset, var, 'pairplot.png')
[ "def", "main", "(", ")", ":", "dataset", "=", "'RD'", "observables", "=", "[", "'log(<\\sigma v>)'", ",", "'\\Omega_{\\chi}h^2'", ",", "'log(\\sigma_p^{SI})'", "]", "var", "=", "[", "'log(m_{\\chi})'", "]", "var", "+=", "[", "'log(C_1)'", ",", "'log(C_2)'", ",", "'log(C_3)'", ",", "'log(C_4)'", ",", "'log(C_5)'", ",", "'log(C_6)'", "]", "var", "+=", "observables", "plot_vs_mass", "(", "dataset", ",", "observables", ",", "'mass_vs_observables.png'", ")", "plot_oneD", "(", "dataset", ",", "var", ",", "'oneD.png'", ")", "pairplot", "(", "dataset", ",", "var", ",", "'pairplot.png'", ")" ]
We here demostrate the basic functionality of barrett. We use a global scan of scalar dark matter as an example. The details aren't really important.
[ "We", "here", "demostrate", "the", "basic", "functionality", "of", "barrett", ".", "We", "use", "a", "global", "scan", "of", "scalar", "dark", "matter", "as", "an", "example", ".", "The", "details", "aren", "t", "really", "important", "." ]
d48e96591577d1fcecd50c21a9be71573218cde7
https://github.com/sliem/barrett/blob/d48e96591577d1fcecd50c21a9be71573218cde7/example/plot.py#L10-L23
243,738
sliem/barrett
example/plot.py
pairplot
def pairplot(dataset, vars, filename, bins=60): """ Plot a matrix of the specified variables with all the 2D pdfs and 1D pdfs. """ n = len(vars) fig, axes = plt.subplots(nrows=n, ncols=n) plt.subplots_adjust(wspace=0.1, hspace=0.1) for i, x in enumerate(vars): for j, y in enumerate(vars): print(((x, y), (i, j))) ax = axes[j,i] if j < i: ax.axis('off') continue elif i == j: P = posterior.oneD(dataset+'.h5', x, limits=limits(x), bins=bins) P.plot(ax) ax.set_xlim(limits(x)) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.xaxis.set_ticks_position('bottom') ax.set_yticks([]) else: P = posterior.twoD(dataset+'.h5', x, y, xlimits=limits(x), ylimits=limits(y), xbins=bins, ybins=bins) # apply some gaussian smoothing to make the contours slightly smoother sigmas = (np.diff(P.ycenters)[0], np.diff(P.xcenters)[0]) P.pdf = gaussian_filter(P.pdf, sigmas, mode='nearest') P.plot(ax, levels=np.linspace(0.9, 0.1, 9)) ax.set_xlim(limits(x)) ax.set_ylim(limits(y)) # now we clean up labels, ticks and such leftmostcol = i == 0 bottomrow = j == n-1 ax.set_xlabel(labels(x) if bottomrow else '') ax.set_ylabel(labels(y) if leftmostcol else '') if not leftmostcol: ax.set_yticklabels([]) if not bottomrow: ax.set_xticklabels([]) fig.set_size_inches(n*4,n*4) fig.savefig(filename, dpi=200, bbox_inches='tight') plt.close(fig)
python
def pairplot(dataset, vars, filename, bins=60): """ Plot a matrix of the specified variables with all the 2D pdfs and 1D pdfs. """ n = len(vars) fig, axes = plt.subplots(nrows=n, ncols=n) plt.subplots_adjust(wspace=0.1, hspace=0.1) for i, x in enumerate(vars): for j, y in enumerate(vars): print(((x, y), (i, j))) ax = axes[j,i] if j < i: ax.axis('off') continue elif i == j: P = posterior.oneD(dataset+'.h5', x, limits=limits(x), bins=bins) P.plot(ax) ax.set_xlim(limits(x)) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.xaxis.set_ticks_position('bottom') ax.set_yticks([]) else: P = posterior.twoD(dataset+'.h5', x, y, xlimits=limits(x), ylimits=limits(y), xbins=bins, ybins=bins) # apply some gaussian smoothing to make the contours slightly smoother sigmas = (np.diff(P.ycenters)[0], np.diff(P.xcenters)[0]) P.pdf = gaussian_filter(P.pdf, sigmas, mode='nearest') P.plot(ax, levels=np.linspace(0.9, 0.1, 9)) ax.set_xlim(limits(x)) ax.set_ylim(limits(y)) # now we clean up labels, ticks and such leftmostcol = i == 0 bottomrow = j == n-1 ax.set_xlabel(labels(x) if bottomrow else '') ax.set_ylabel(labels(y) if leftmostcol else '') if not leftmostcol: ax.set_yticklabels([]) if not bottomrow: ax.set_xticklabels([]) fig.set_size_inches(n*4,n*4) fig.savefig(filename, dpi=200, bbox_inches='tight') plt.close(fig)
[ "def", "pairplot", "(", "dataset", ",", "vars", ",", "filename", ",", "bins", "=", "60", ")", ":", "n", "=", "len", "(", "vars", ")", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "nrows", "=", "n", ",", "ncols", "=", "n", ")", "plt", ".", "subplots_adjust", "(", "wspace", "=", "0.1", ",", "hspace", "=", "0.1", ")", "for", "i", ",", "x", "in", "enumerate", "(", "vars", ")", ":", "for", "j", ",", "y", "in", "enumerate", "(", "vars", ")", ":", "print", "(", "(", "(", "x", ",", "y", ")", ",", "(", "i", ",", "j", ")", ")", ")", "ax", "=", "axes", "[", "j", ",", "i", "]", "if", "j", "<", "i", ":", "ax", ".", "axis", "(", "'off'", ")", "continue", "elif", "i", "==", "j", ":", "P", "=", "posterior", ".", "oneD", "(", "dataset", "+", "'.h5'", ",", "x", ",", "limits", "=", "limits", "(", "x", ")", ",", "bins", "=", "bins", ")", "P", ".", "plot", "(", "ax", ")", "ax", ".", "set_xlim", "(", "limits", "(", "x", ")", ")", "ax", ".", "spines", "[", "'right'", "]", ".", "set_visible", "(", "False", ")", "ax", ".", "spines", "[", "'top'", "]", ".", "set_visible", "(", "False", ")", "ax", ".", "xaxis", ".", "set_ticks_position", "(", "'bottom'", ")", "ax", ".", "set_yticks", "(", "[", "]", ")", "else", ":", "P", "=", "posterior", ".", "twoD", "(", "dataset", "+", "'.h5'", ",", "x", ",", "y", ",", "xlimits", "=", "limits", "(", "x", ")", ",", "ylimits", "=", "limits", "(", "y", ")", ",", "xbins", "=", "bins", ",", "ybins", "=", "bins", ")", "# apply some gaussian smoothing to make the contours slightly smoother", "sigmas", "=", "(", "np", ".", "diff", "(", "P", ".", "ycenters", ")", "[", "0", "]", ",", "np", ".", "diff", "(", "P", ".", "xcenters", ")", "[", "0", "]", ")", "P", ".", "pdf", "=", "gaussian_filter", "(", "P", ".", "pdf", ",", "sigmas", ",", "mode", "=", "'nearest'", ")", "P", ".", "plot", "(", "ax", ",", "levels", "=", "np", ".", "linspace", "(", "0.9", ",", "0.1", ",", "9", ")", ")", "ax", ".", "set_xlim", "(", "limits", "(", "x", ")", ")", "ax", ".", "set_ylim", "(", "limits", "(", "y", ")", ")", "# now we clean up labels, ticks and such", "leftmostcol", "=", "i", "==", "0", "bottomrow", "=", "j", "==", "n", "-", "1", "ax", ".", "set_xlabel", "(", "labels", "(", "x", ")", "if", "bottomrow", "else", "''", ")", "ax", ".", "set_ylabel", "(", "labels", "(", "y", ")", "if", "leftmostcol", "else", "''", ")", "if", "not", "leftmostcol", ":", "ax", ".", "set_yticklabels", "(", "[", "]", ")", "if", "not", "bottomrow", ":", "ax", ".", "set_xticklabels", "(", "[", "]", ")", "fig", ".", "set_size_inches", "(", "n", "*", "4", ",", "n", "*", "4", ")", "fig", ".", "savefig", "(", "filename", ",", "dpi", "=", "200", ",", "bbox_inches", "=", "'tight'", ")", "plt", ".", "close", "(", "fig", ")" ]
Plot a matrix of the specified variables with all the 2D pdfs and 1D pdfs.
[ "Plot", "a", "matrix", "of", "the", "specified", "variables", "with", "all", "the", "2D", "pdfs", "and", "1D", "pdfs", "." ]
d48e96591577d1fcecd50c21a9be71573218cde7
https://github.com/sliem/barrett/blob/d48e96591577d1fcecd50c21a9be71573218cde7/example/plot.py#L26-L73
243,739
sliem/barrett
example/plot.py
plot_vs_mass
def plot_vs_mass(dataset, vars, filename, bins=60): """ Plot 2D marginalised posteriors of the 'vars' vs the dark matter mass. We plot the one sigma, and two sigma filled contours. More contours can be plotted which produces something more akin to a heatmap. If one require more complicated plotting, it is recommended to write a custom plotting function by extending the default plot() method. """ n = len(vars) fig, axes = plt.subplots(nrows=n, ncols=1, sharex='col', sharey=False) plt.subplots_adjust(wspace=0, hspace=0) m = 'log(m_{\chi})' for i, y in enumerate(vars): ax = axes[i] P = posterior.twoD(dataset+'.h5', m, y, xlimits=limits(m), ylimits=limits(y), xbins=bins, ybins=bins) # apply some gaussian smoothing to make the contours slightly smoother sigmas = (np.diff(P.ycenters)[0], np.diff(P.xcenters)[0]) P.pdf = gaussian_filter(P.pdf, sigmas, mode='nearest') P.plot(ax, levels=np.linspace(0.9, 0.1, 9)) ax.set_xlabel(labels('log(m_{\chi})')) ax.set_ylabel(labels(y)) fig.set_size_inches(4,n*3) fig.savefig(filename, dpi=200, bbox_inches='tight') plt.close(fig)
python
def plot_vs_mass(dataset, vars, filename, bins=60): """ Plot 2D marginalised posteriors of the 'vars' vs the dark matter mass. We plot the one sigma, and two sigma filled contours. More contours can be plotted which produces something more akin to a heatmap. If one require more complicated plotting, it is recommended to write a custom plotting function by extending the default plot() method. """ n = len(vars) fig, axes = plt.subplots(nrows=n, ncols=1, sharex='col', sharey=False) plt.subplots_adjust(wspace=0, hspace=0) m = 'log(m_{\chi})' for i, y in enumerate(vars): ax = axes[i] P = posterior.twoD(dataset+'.h5', m, y, xlimits=limits(m), ylimits=limits(y), xbins=bins, ybins=bins) # apply some gaussian smoothing to make the contours slightly smoother sigmas = (np.diff(P.ycenters)[0], np.diff(P.xcenters)[0]) P.pdf = gaussian_filter(P.pdf, sigmas, mode='nearest') P.plot(ax, levels=np.linspace(0.9, 0.1, 9)) ax.set_xlabel(labels('log(m_{\chi})')) ax.set_ylabel(labels(y)) fig.set_size_inches(4,n*3) fig.savefig(filename, dpi=200, bbox_inches='tight') plt.close(fig)
[ "def", "plot_vs_mass", "(", "dataset", ",", "vars", ",", "filename", ",", "bins", "=", "60", ")", ":", "n", "=", "len", "(", "vars", ")", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "nrows", "=", "n", ",", "ncols", "=", "1", ",", "sharex", "=", "'col'", ",", "sharey", "=", "False", ")", "plt", ".", "subplots_adjust", "(", "wspace", "=", "0", ",", "hspace", "=", "0", ")", "m", "=", "'log(m_{\\chi})'", "for", "i", ",", "y", "in", "enumerate", "(", "vars", ")", ":", "ax", "=", "axes", "[", "i", "]", "P", "=", "posterior", ".", "twoD", "(", "dataset", "+", "'.h5'", ",", "m", ",", "y", ",", "xlimits", "=", "limits", "(", "m", ")", ",", "ylimits", "=", "limits", "(", "y", ")", ",", "xbins", "=", "bins", ",", "ybins", "=", "bins", ")", "# apply some gaussian smoothing to make the contours slightly smoother", "sigmas", "=", "(", "np", ".", "diff", "(", "P", ".", "ycenters", ")", "[", "0", "]", ",", "np", ".", "diff", "(", "P", ".", "xcenters", ")", "[", "0", "]", ")", "P", ".", "pdf", "=", "gaussian_filter", "(", "P", ".", "pdf", ",", "sigmas", ",", "mode", "=", "'nearest'", ")", "P", ".", "plot", "(", "ax", ",", "levels", "=", "np", ".", "linspace", "(", "0.9", ",", "0.1", ",", "9", ")", ")", "ax", ".", "set_xlabel", "(", "labels", "(", "'log(m_{\\chi})'", ")", ")", "ax", ".", "set_ylabel", "(", "labels", "(", "y", ")", ")", "fig", ".", "set_size_inches", "(", "4", ",", "n", "*", "3", ")", "fig", ".", "savefig", "(", "filename", ",", "dpi", "=", "200", ",", "bbox_inches", "=", "'tight'", ")", "plt", ".", "close", "(", "fig", ")" ]
Plot 2D marginalised posteriors of the 'vars' vs the dark matter mass. We plot the one sigma, and two sigma filled contours. More contours can be plotted which produces something more akin to a heatmap. If one require more complicated plotting, it is recommended to write a custom plotting function by extending the default plot() method.
[ "Plot", "2D", "marginalised", "posteriors", "of", "the", "vars", "vs", "the", "dark", "matter", "mass", ".", "We", "plot", "the", "one", "sigma", "and", "two", "sigma", "filled", "contours", ".", "More", "contours", "can", "be", "plotted", "which", "produces", "something", "more", "akin", "to", "a", "heatmap", "." ]
d48e96591577d1fcecd50c21a9be71573218cde7
https://github.com/sliem/barrett/blob/d48e96591577d1fcecd50c21a9be71573218cde7/example/plot.py#L76-L110
243,740
sliem/barrett
example/plot.py
plot_oneD
def plot_oneD(dataset, vars, filename, bins=60): """ Plot 1D marginalised posteriors for the 'vars' of interest.""" n = len(vars) fig, axes = plt.subplots(nrows=n, ncols=1, sharex=False, sharey=False) for i, x in enumerate(vars): ax = axes[i] P = posterior.oneD(dataset+'.h5', x, limits=limits(x), bins=bins) P.plot(ax) ax.set_xlabel(labels(x)) ax.set_yticklabels([]) fig.set_size_inches(4, 4*n) fig.savefig(filename, dpi=200, bbox_inches='tight') plt.close(fig)
python
def plot_oneD(dataset, vars, filename, bins=60): """ Plot 1D marginalised posteriors for the 'vars' of interest.""" n = len(vars) fig, axes = plt.subplots(nrows=n, ncols=1, sharex=False, sharey=False) for i, x in enumerate(vars): ax = axes[i] P = posterior.oneD(dataset+'.h5', x, limits=limits(x), bins=bins) P.plot(ax) ax.set_xlabel(labels(x)) ax.set_yticklabels([]) fig.set_size_inches(4, 4*n) fig.savefig(filename, dpi=200, bbox_inches='tight') plt.close(fig)
[ "def", "plot_oneD", "(", "dataset", ",", "vars", ",", "filename", ",", "bins", "=", "60", ")", ":", "n", "=", "len", "(", "vars", ")", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "nrows", "=", "n", ",", "ncols", "=", "1", ",", "sharex", "=", "False", ",", "sharey", "=", "False", ")", "for", "i", ",", "x", "in", "enumerate", "(", "vars", ")", ":", "ax", "=", "axes", "[", "i", "]", "P", "=", "posterior", ".", "oneD", "(", "dataset", "+", "'.h5'", ",", "x", ",", "limits", "=", "limits", "(", "x", ")", ",", "bins", "=", "bins", ")", "P", ".", "plot", "(", "ax", ")", "ax", ".", "set_xlabel", "(", "labels", "(", "x", ")", ")", "ax", ".", "set_yticklabels", "(", "[", "]", ")", "fig", ".", "set_size_inches", "(", "4", ",", "4", "*", "n", ")", "fig", ".", "savefig", "(", "filename", ",", "dpi", "=", "200", ",", "bbox_inches", "=", "'tight'", ")", "plt", ".", "close", "(", "fig", ")" ]
Plot 1D marginalised posteriors for the 'vars' of interest.
[ "Plot", "1D", "marginalised", "posteriors", "for", "the", "vars", "of", "interest", "." ]
d48e96591577d1fcecd50c21a9be71573218cde7
https://github.com/sliem/barrett/blob/d48e96591577d1fcecd50c21a9be71573218cde7/example/plot.py#L113-L131
243,741
Vesuvium/frustum
frustum/Frustum.py
Frustum.start_logger
def start_logger(self): """ Enables the root logger and configures extra loggers. """ level = self.real_level(self.level) logging.basicConfig(level=level) self.set_logger(self.name, self.level) config.dictConfig(self.config) self.logger = logging.getLogger(self.name)
python
def start_logger(self): """ Enables the root logger and configures extra loggers. """ level = self.real_level(self.level) logging.basicConfig(level=level) self.set_logger(self.name, self.level) config.dictConfig(self.config) self.logger = logging.getLogger(self.name)
[ "def", "start_logger", "(", "self", ")", ":", "level", "=", "self", ".", "real_level", "(", "self", ".", "level", ")", "logging", ".", "basicConfig", "(", "level", "=", "level", ")", "self", ".", "set_logger", "(", "self", ".", "name", ",", "self", ".", "level", ")", "config", ".", "dictConfig", "(", "self", ".", "config", ")", "self", ".", "logger", "=", "logging", ".", "getLogger", "(", "self", ".", "name", ")" ]
Enables the root logger and configures extra loggers.
[ "Enables", "the", "root", "logger", "and", "configures", "extra", "loggers", "." ]
3ee354df5ce12c31bc2d18febea0cca912a7c4e3
https://github.com/Vesuvium/frustum/blob/3ee354df5ce12c31bc2d18febea0cca912a7c4e3/frustum/Frustum.py#L22-L30
243,742
Vesuvium/frustum
frustum/Frustum.py
Frustum.set_logger
def set_logger(self, logger_name, level, handler=None): """ Sets the level of a logger """ if 'loggers' not in self.config: self.config['loggers'] = {} real_level = self.real_level(level) self.config['loggers'][logger_name] = {'level': real_level} if handler: self.config['loggers'][logger_name]['handlers'] = [handler]
python
def set_logger(self, logger_name, level, handler=None): """ Sets the level of a logger """ if 'loggers' not in self.config: self.config['loggers'] = {} real_level = self.real_level(level) self.config['loggers'][logger_name] = {'level': real_level} if handler: self.config['loggers'][logger_name]['handlers'] = [handler]
[ "def", "set_logger", "(", "self", ",", "logger_name", ",", "level", ",", "handler", "=", "None", ")", ":", "if", "'loggers'", "not", "in", "self", ".", "config", ":", "self", ".", "config", "[", "'loggers'", "]", "=", "{", "}", "real_level", "=", "self", ".", "real_level", "(", "level", ")", "self", ".", "config", "[", "'loggers'", "]", "[", "logger_name", "]", "=", "{", "'level'", ":", "real_level", "}", "if", "handler", ":", "self", ".", "config", "[", "'loggers'", "]", "[", "logger_name", "]", "[", "'handlers'", "]", "=", "[", "handler", "]" ]
Sets the level of a logger
[ "Sets", "the", "level", "of", "a", "logger" ]
3ee354df5ce12c31bc2d18febea0cca912a7c4e3
https://github.com/Vesuvium/frustum/blob/3ee354df5ce12c31bc2d18febea0cca912a7c4e3/frustum/Frustum.py#L37-L46
243,743
Vesuvium/frustum
frustum/Frustum.py
Frustum.register_event
def register_event(self, event_name, event_level, message): """ Registers an event so that it can be logged later. """ self.events[event_name] = (event_level, message)
python
def register_event(self, event_name, event_level, message): """ Registers an event so that it can be logged later. """ self.events[event_name] = (event_level, message)
[ "def", "register_event", "(", "self", ",", "event_name", ",", "event_level", ",", "message", ")", ":", "self", ".", "events", "[", "event_name", "]", "=", "(", "event_level", ",", "message", ")" ]
Registers an event so that it can be logged later.
[ "Registers", "an", "event", "so", "that", "it", "can", "be", "logged", "later", "." ]
3ee354df5ce12c31bc2d18febea0cca912a7c4e3
https://github.com/Vesuvium/frustum/blob/3ee354df5ce12c31bc2d18febea0cca912a7c4e3/frustum/Frustum.py#L55-L59
243,744
ikalnytskyi/dooku
dooku/datetime.py
to_iso8601
def to_iso8601(dt, tz=None): """ Returns an ISO-8601 representation of a given datetime instance. >>> to_iso8601(datetime.datetime.now()) '2014-10-01T23:21:33.718508Z' :param dt: a :class:`~datetime.datetime` instance :param tz: a :class:`~datetime.tzinfo` to use; if None - use a default one """ if tz is not None: dt = dt.replace(tzinfo=tz) iso8601 = dt.isoformat() # Naive datetime objects usually don't have info about timezone. # Let's assume it's UTC and add Z to the end. if re.match(r'.*(Z|[+-]\d{2}:\d{2})$', iso8601) is None: iso8601 += 'Z' return iso8601
python
def to_iso8601(dt, tz=None): """ Returns an ISO-8601 representation of a given datetime instance. >>> to_iso8601(datetime.datetime.now()) '2014-10-01T23:21:33.718508Z' :param dt: a :class:`~datetime.datetime` instance :param tz: a :class:`~datetime.tzinfo` to use; if None - use a default one """ if tz is not None: dt = dt.replace(tzinfo=tz) iso8601 = dt.isoformat() # Naive datetime objects usually don't have info about timezone. # Let's assume it's UTC and add Z to the end. if re.match(r'.*(Z|[+-]\d{2}:\d{2})$', iso8601) is None: iso8601 += 'Z' return iso8601
[ "def", "to_iso8601", "(", "dt", ",", "tz", "=", "None", ")", ":", "if", "tz", "is", "not", "None", ":", "dt", "=", "dt", ".", "replace", "(", "tzinfo", "=", "tz", ")", "iso8601", "=", "dt", ".", "isoformat", "(", ")", "# Naive datetime objects usually don't have info about timezone.", "# Let's assume it's UTC and add Z to the end.", "if", "re", ".", "match", "(", "r'.*(Z|[+-]\\d{2}:\\d{2})$'", ",", "iso8601", ")", "is", "None", ":", "iso8601", "+=", "'Z'", "return", "iso8601" ]
Returns an ISO-8601 representation of a given datetime instance. >>> to_iso8601(datetime.datetime.now()) '2014-10-01T23:21:33.718508Z' :param dt: a :class:`~datetime.datetime` instance :param tz: a :class:`~datetime.tzinfo` to use; if None - use a default one
[ "Returns", "an", "ISO", "-", "8601", "representation", "of", "a", "given", "datetime", "instance", "." ]
77e6c82c9c41211c86ee36ae5e591d477945fedf
https://github.com/ikalnytskyi/dooku/blob/77e6c82c9c41211c86ee36ae5e591d477945fedf/dooku/datetime.py#L20-L39
243,745
ikalnytskyi/dooku
dooku/datetime.py
Local._is_dst
def _is_dst(dt): """ Returns True if a given datetime object represents a time with DST shift. """ # we can't use `dt.timestamp()` here since it requires a `utcoffset` # and we don't want to get into a recursive loop localtime = time.localtime(time.mktime(( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, # day of the year -1 # dst ))) return localtime.tm_isdst > 0
python
def _is_dst(dt): """ Returns True if a given datetime object represents a time with DST shift. """ # we can't use `dt.timestamp()` here since it requires a `utcoffset` # and we don't want to get into a recursive loop localtime = time.localtime(time.mktime(( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, # day of the year -1 # dst ))) return localtime.tm_isdst > 0
[ "def", "_is_dst", "(", "dt", ")", ":", "# we can't use `dt.timestamp()` here since it requires a `utcoffset`", "# and we don't want to get into a recursive loop", "localtime", "=", "time", ".", "localtime", "(", "time", ".", "mktime", "(", "(", "dt", ".", "year", ",", "dt", ".", "month", ",", "dt", ".", "day", ",", "dt", ".", "hour", ",", "dt", ".", "minute", ",", "dt", ".", "second", ",", "dt", ".", "weekday", "(", ")", ",", "0", ",", "# day of the year", "-", "1", "# dst", ")", ")", ")", "return", "localtime", ".", "tm_isdst", ">", "0" ]
Returns True if a given datetime object represents a time with DST shift.
[ "Returns", "True", "if", "a", "given", "datetime", "object", "represents", "a", "time", "with", "DST", "shift", "." ]
77e6c82c9c41211c86ee36ae5e591d477945fedf
https://github.com/ikalnytskyi/dooku/blob/77e6c82c9c41211c86ee36ae5e591d477945fedf/dooku/datetime.py#L93-L111
243,746
ikalnytskyi/dooku
dooku/datetime.py
Local.dst
def dst(self, dt): """ Returns a difference in seconds between standard offset and dst offset. """ if not self._is_dst(dt): return datetime.timedelta(0) offset = time.timezone - time.altzone return datetime.timedelta(seconds=-offset)
python
def dst(self, dt): """ Returns a difference in seconds between standard offset and dst offset. """ if not self._is_dst(dt): return datetime.timedelta(0) offset = time.timezone - time.altzone return datetime.timedelta(seconds=-offset)
[ "def", "dst", "(", "self", ",", "dt", ")", ":", "if", "not", "self", ".", "_is_dst", "(", "dt", ")", ":", "return", "datetime", ".", "timedelta", "(", "0", ")", "offset", "=", "time", ".", "timezone", "-", "time", ".", "altzone", "return", "datetime", ".", "timedelta", "(", "seconds", "=", "-", "offset", ")" ]
Returns a difference in seconds between standard offset and dst offset.
[ "Returns", "a", "difference", "in", "seconds", "between", "standard", "offset", "and", "dst", "offset", "." ]
77e6c82c9c41211c86ee36ae5e591d477945fedf
https://github.com/ikalnytskyi/dooku/blob/77e6c82c9c41211c86ee36ae5e591d477945fedf/dooku/datetime.py#L118-L127
243,747
collectiveacuity/labPack
labpack/parsing/conversion.py
_to_camelcase
def _to_camelcase(input_string): ''' a helper method to convert python to camelcase''' camel_string = '' for i in range(len(input_string)): if input_string[i] == '_': pass elif not camel_string: camel_string += input_string[i].upper() elif input_string[i-1] == '_': camel_string += input_string[i].upper() else: camel_string += input_string[i] return camel_string
python
def _to_camelcase(input_string): ''' a helper method to convert python to camelcase''' camel_string = '' for i in range(len(input_string)): if input_string[i] == '_': pass elif not camel_string: camel_string += input_string[i].upper() elif input_string[i-1] == '_': camel_string += input_string[i].upper() else: camel_string += input_string[i] return camel_string
[ "def", "_to_camelcase", "(", "input_string", ")", ":", "camel_string", "=", "''", "for", "i", "in", "range", "(", "len", "(", "input_string", ")", ")", ":", "if", "input_string", "[", "i", "]", "==", "'_'", ":", "pass", "elif", "not", "camel_string", ":", "camel_string", "+=", "input_string", "[", "i", "]", ".", "upper", "(", ")", "elif", "input_string", "[", "i", "-", "1", "]", "==", "'_'", ":", "camel_string", "+=", "input_string", "[", "i", "]", ".", "upper", "(", ")", "else", ":", "camel_string", "+=", "input_string", "[", "i", "]", "return", "camel_string" ]
a helper method to convert python to camelcase
[ "a", "helper", "method", "to", "convert", "python", "to", "camelcase" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/parsing/conversion.py#L6-L18
243,748
collectiveacuity/labPack
labpack/parsing/conversion.py
_to_python
def _to_python(input_string): ''' a helper method to convert camelcase to python''' python_string = '' for i in range(len(input_string)): if not python_string: python_string += input_string[i].lower() elif input_string[i].isupper(): python_string += '_%s' % input_string[i].lower() else: python_string += input_string[i] return python_string
python
def _to_python(input_string): ''' a helper method to convert camelcase to python''' python_string = '' for i in range(len(input_string)): if not python_string: python_string += input_string[i].lower() elif input_string[i].isupper(): python_string += '_%s' % input_string[i].lower() else: python_string += input_string[i] return python_string
[ "def", "_to_python", "(", "input_string", ")", ":", "python_string", "=", "''", "for", "i", "in", "range", "(", "len", "(", "input_string", ")", ")", ":", "if", "not", "python_string", ":", "python_string", "+=", "input_string", "[", "i", "]", ".", "lower", "(", ")", "elif", "input_string", "[", "i", "]", ".", "isupper", "(", ")", ":", "python_string", "+=", "'_%s'", "%", "input_string", "[", "i", "]", ".", "lower", "(", ")", "else", ":", "python_string", "+=", "input_string", "[", "i", "]", "return", "python_string" ]
a helper method to convert camelcase to python
[ "a", "helper", "method", "to", "convert", "camelcase", "to", "python" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/parsing/conversion.py#L20-L30
243,749
GemHQ/round-py
round/subscriptions.py
Subscriptions.create
def create(self, callback_url): """Register a new Subscription on this collection's parent object. Args: callback_url (str): URI of an active endpoint which can receive notifications. Returns: A round.Subscription object if successful. """ resource = self.resource.create({'subscribed_to': 'address', 'callback_url': callback_url}) subscription = self.wrap(resource) self.add(subscription) return subscription
python
def create(self, callback_url): """Register a new Subscription on this collection's parent object. Args: callback_url (str): URI of an active endpoint which can receive notifications. Returns: A round.Subscription object if successful. """ resource = self.resource.create({'subscribed_to': 'address', 'callback_url': callback_url}) subscription = self.wrap(resource) self.add(subscription) return subscription
[ "def", "create", "(", "self", ",", "callback_url", ")", ":", "resource", "=", "self", ".", "resource", ".", "create", "(", "{", "'subscribed_to'", ":", "'address'", ",", "'callback_url'", ":", "callback_url", "}", ")", "subscription", "=", "self", ".", "wrap", "(", "resource", ")", "self", ".", "add", "(", "subscription", ")", "return", "subscription" ]
Register a new Subscription on this collection's parent object. Args: callback_url (str): URI of an active endpoint which can receive notifications. Returns: A round.Subscription object if successful.
[ "Register", "a", "new", "Subscription", "on", "this", "collection", "s", "parent", "object", "." ]
d0838f849cd260b1eb5df67ed3c6f2fe56c91c21
https://github.com/GemHQ/round-py/blob/d0838f849cd260b1eb5df67ed3c6f2fe56c91c21/round/subscriptions.py#L15-L29
243,750
mbodenhamer/syn
syn/schema/b/sequence.py
Sequence.match
def match(self, seq, **kwargs): '''If the schema matches seq, returns a list of the matched objects. Otherwise, returns MatchFailure instance. ''' strict = kwargs.get('strict', False) top_level = kwargs.get('top_level', True) match = kwargs.get('match', list()) if top_level: kwargs['top_level'] = False kwargs['match'] = match try: seq = IterableList(seq) self.match(seq, **kwargs) if strict: if not seq.empty(): raise MatchFailed('Sequence is too long', seq) except MatchFailed as e: return e.failure() return Match(*match) for elem in self.elems: elem.match(seq, **kwargs)
python
def match(self, seq, **kwargs): '''If the schema matches seq, returns a list of the matched objects. Otherwise, returns MatchFailure instance. ''' strict = kwargs.get('strict', False) top_level = kwargs.get('top_level', True) match = kwargs.get('match', list()) if top_level: kwargs['top_level'] = False kwargs['match'] = match try: seq = IterableList(seq) self.match(seq, **kwargs) if strict: if not seq.empty(): raise MatchFailed('Sequence is too long', seq) except MatchFailed as e: return e.failure() return Match(*match) for elem in self.elems: elem.match(seq, **kwargs)
[ "def", "match", "(", "self", ",", "seq", ",", "*", "*", "kwargs", ")", ":", "strict", "=", "kwargs", ".", "get", "(", "'strict'", ",", "False", ")", "top_level", "=", "kwargs", ".", "get", "(", "'top_level'", ",", "True", ")", "match", "=", "kwargs", ".", "get", "(", "'match'", ",", "list", "(", ")", ")", "if", "top_level", ":", "kwargs", "[", "'top_level'", "]", "=", "False", "kwargs", "[", "'match'", "]", "=", "match", "try", ":", "seq", "=", "IterableList", "(", "seq", ")", "self", ".", "match", "(", "seq", ",", "*", "*", "kwargs", ")", "if", "strict", ":", "if", "not", "seq", ".", "empty", "(", ")", ":", "raise", "MatchFailed", "(", "'Sequence is too long'", ",", "seq", ")", "except", "MatchFailed", "as", "e", ":", "return", "e", ".", "failure", "(", ")", "return", "Match", "(", "*", "match", ")", "for", "elem", "in", "self", ".", "elems", ":", "elem", ".", "match", "(", "seq", ",", "*", "*", "kwargs", ")" ]
If the schema matches seq, returns a list of the matched objects. Otherwise, returns MatchFailure instance.
[ "If", "the", "schema", "matches", "seq", "returns", "a", "list", "of", "the", "matched", "objects", ".", "Otherwise", "returns", "MatchFailure", "instance", "." ]
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/schema/b/sequence.py#L259-L285
243,751
scieloorg/accessstatsapi
accessstats/queries.py
downloads_per_year
def downloads_per_year(collection, code, raw=False): """ This method retrieve the total of downloads per year. arguments collection: SciELO 3 letters Acronym code: (Journal ISSN, Issue PID, Article PID) return [ ("2017", "20101"), ("2016", "11201"), ("2015", "12311"), ... ] """ tc = ThriftClient() body = {"query": {"filtered": {}}} fltr = {} query = { "query": { "bool": { "must": [ { "match": { "collection": collection } } ] } } } aggs = { "aggs": { "access_year": { "terms": { "field": "access_year", "size": 0, "order": { "_term": "asc" } }, "aggs": { "access_total": { "sum": { "field": "access_total" } } } } } } body['query']['filtered'].update(fltr) body['query']['filtered'].update(query) body.update(aggs) code_type = _code_type(code) if code_type: query["query"]["bool"]["must"].append({ "match": { code_type: code } }) query_parameters = [ ('size', '0') ] query_result = tc.search(json.dumps(body), query_parameters) return query_result if raw is True else _compute_downloads_per_year(query_result)
python
def downloads_per_year(collection, code, raw=False): """ This method retrieve the total of downloads per year. arguments collection: SciELO 3 letters Acronym code: (Journal ISSN, Issue PID, Article PID) return [ ("2017", "20101"), ("2016", "11201"), ("2015", "12311"), ... ] """ tc = ThriftClient() body = {"query": {"filtered": {}}} fltr = {} query = { "query": { "bool": { "must": [ { "match": { "collection": collection } } ] } } } aggs = { "aggs": { "access_year": { "terms": { "field": "access_year", "size": 0, "order": { "_term": "asc" } }, "aggs": { "access_total": { "sum": { "field": "access_total" } } } } } } body['query']['filtered'].update(fltr) body['query']['filtered'].update(query) body.update(aggs) code_type = _code_type(code) if code_type: query["query"]["bool"]["must"].append({ "match": { code_type: code } }) query_parameters = [ ('size', '0') ] query_result = tc.search(json.dumps(body), query_parameters) return query_result if raw is True else _compute_downloads_per_year(query_result)
[ "def", "downloads_per_year", "(", "collection", ",", "code", ",", "raw", "=", "False", ")", ":", "tc", "=", "ThriftClient", "(", ")", "body", "=", "{", "\"query\"", ":", "{", "\"filtered\"", ":", "{", "}", "}", "}", "fltr", "=", "{", "}", "query", "=", "{", "\"query\"", ":", "{", "\"bool\"", ":", "{", "\"must\"", ":", "[", "{", "\"match\"", ":", "{", "\"collection\"", ":", "collection", "}", "}", "]", "}", "}", "}", "aggs", "=", "{", "\"aggs\"", ":", "{", "\"access_year\"", ":", "{", "\"terms\"", ":", "{", "\"field\"", ":", "\"access_year\"", ",", "\"size\"", ":", "0", ",", "\"order\"", ":", "{", "\"_term\"", ":", "\"asc\"", "}", "}", ",", "\"aggs\"", ":", "{", "\"access_total\"", ":", "{", "\"sum\"", ":", "{", "\"field\"", ":", "\"access_total\"", "}", "}", "}", "}", "}", "}", "body", "[", "'query'", "]", "[", "'filtered'", "]", ".", "update", "(", "fltr", ")", "body", "[", "'query'", "]", "[", "'filtered'", "]", ".", "update", "(", "query", ")", "body", ".", "update", "(", "aggs", ")", "code_type", "=", "_code_type", "(", "code", ")", "if", "code_type", ":", "query", "[", "\"query\"", "]", "[", "\"bool\"", "]", "[", "\"must\"", "]", ".", "append", "(", "{", "\"match\"", ":", "{", "code_type", ":", "code", "}", "}", ")", "query_parameters", "=", "[", "(", "'size'", ",", "'0'", ")", "]", "query_result", "=", "tc", ".", "search", "(", "json", ".", "dumps", "(", "body", ")", ",", "query_parameters", ")", "return", "query_result", "if", "raw", "is", "True", "else", "_compute_downloads_per_year", "(", "query_result", ")" ]
This method retrieve the total of downloads per year. arguments collection: SciELO 3 letters Acronym code: (Journal ISSN, Issue PID, Article PID) return [ ("2017", "20101"), ("2016", "11201"), ("2015", "12311"), ... ]
[ "This", "method", "retrieve", "the", "total", "of", "downloads", "per", "year", "." ]
8092d76bedab9e82efce4005f9bcd21fb94e8e98
https://github.com/scieloorg/accessstatsapi/blob/8092d76bedab9e82efce4005f9bcd21fb94e8e98/accessstats/queries.py#L38-L115
243,752
zorg/zorg-emic
zorg_emic/emic2.py
Emic2.word_wrap
def word_wrap(self, text, width=1023): """ A simple word wrapping greedy algorithm that puts as many words into a single string as possible. """ substrings = [] string = text while len(string) > width: index = width - 1 while not string[index].isspace(): index = index - 1 line = string[0:index] substrings.append(line) string = string[index + 1:] substrings.append(string) return substrings
python
def word_wrap(self, text, width=1023): """ A simple word wrapping greedy algorithm that puts as many words into a single string as possible. """ substrings = [] string = text while len(string) > width: index = width - 1 while not string[index].isspace(): index = index - 1 line = string[0:index] substrings.append(line) string = string[index + 1:] substrings.append(string) return substrings
[ "def", "word_wrap", "(", "self", ",", "text", ",", "width", "=", "1023", ")", ":", "substrings", "=", "[", "]", "string", "=", "text", "while", "len", "(", "string", ")", ">", "width", ":", "index", "=", "width", "-", "1", "while", "not", "string", "[", "index", "]", ".", "isspace", "(", ")", ":", "index", "=", "index", "-", "1", "line", "=", "string", "[", "0", ":", "index", "]", "substrings", ".", "append", "(", "line", ")", "string", "=", "string", "[", "index", "+", "1", ":", "]", "substrings", ".", "append", "(", "string", ")", "return", "substrings" ]
A simple word wrapping greedy algorithm that puts as many words into a single string as possible.
[ "A", "simple", "word", "wrapping", "greedy", "algorithm", "that", "puts", "as", "many", "words", "into", "a", "single", "string", "as", "possible", "." ]
34d49897131cf7773b2b0f46e1e0a796911144e3
https://github.com/zorg/zorg-emic/blob/34d49897131cf7773b2b0f46e1e0a796911144e3/zorg_emic/emic2.py#L66-L86
243,753
zorg/zorg-emic
zorg_emic/emic2.py
Emic2.speak
def speak(self, text): """ The main function to convert text into speech. """ if not self.is_valid_string(text): raise Exception("%s is not ISO-8859-1 compatible." % (text)) # Maximum allowable 1023 characters per message if len(text) > 1023: lines = self.word_wrap(text, width=1023) for line in lines: self.queue.put("S%s" % (line)) else: self.queue.put("S%s" % (text))
python
def speak(self, text): """ The main function to convert text into speech. """ if not self.is_valid_string(text): raise Exception("%s is not ISO-8859-1 compatible." % (text)) # Maximum allowable 1023 characters per message if len(text) > 1023: lines = self.word_wrap(text, width=1023) for line in lines: self.queue.put("S%s" % (line)) else: self.queue.put("S%s" % (text))
[ "def", "speak", "(", "self", ",", "text", ")", ":", "if", "not", "self", ".", "is_valid_string", "(", "text", ")", ":", "raise", "Exception", "(", "\"%s is not ISO-8859-1 compatible.\"", "%", "(", "text", ")", ")", "# Maximum allowable 1023 characters per message", "if", "len", "(", "text", ")", ">", "1023", ":", "lines", "=", "self", ".", "word_wrap", "(", "text", ",", "width", "=", "1023", ")", "for", "line", "in", "lines", ":", "self", ".", "queue", ".", "put", "(", "\"S%s\"", "%", "(", "line", ")", ")", "else", ":", "self", ".", "queue", ".", "put", "(", "\"S%s\"", "%", "(", "text", ")", ")" ]
The main function to convert text into speech.
[ "The", "main", "function", "to", "convert", "text", "into", "speech", "." ]
34d49897131cf7773b2b0f46e1e0a796911144e3
https://github.com/zorg/zorg-emic/blob/34d49897131cf7773b2b0f46e1e0a796911144e3/zorg_emic/emic2.py#L88-L101
243,754
xtream1101/web-wrapper
web_wrapper/selenium_utils.py
SeleniumUtils._get_site
def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs): """ Try and return page content in the requested format using selenium """ try: # **TODO**: Find what exception this will throw and catch it and call # self.driver.execute_script("window.stop()") # Then still try and get the source from the page self.driver.set_page_load_timeout(timeout) self.driver.get(url) header_data = self.get_selenium_header() status_code = header_data['status-code'] # Set data to access from script self.status_code = status_code self.url = self.driver.current_url except TimeoutException: logger.warning("Page timeout: {}".format(url)) try: scraper_monitor.failed_url(url, 'Timeout') except (NameError, AttributeError): # Happens when scraper_monitor is not being used/setup pass except Exception: logger.exception("Unknown problem with scraper_monitor sending a failed url") except Exception as e: raise e.with_traceback(sys.exc_info()[2]) else: # If an exception was not thrown then check the http status code if status_code < 400: # If the http status code is not an error return self.driver.page_source else: # If http status code is 400 or greater raise SeleniumHTTPError("Status code >= 400", status_code=status_code)
python
def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs): """ Try and return page content in the requested format using selenium """ try: # **TODO**: Find what exception this will throw and catch it and call # self.driver.execute_script("window.stop()") # Then still try and get the source from the page self.driver.set_page_load_timeout(timeout) self.driver.get(url) header_data = self.get_selenium_header() status_code = header_data['status-code'] # Set data to access from script self.status_code = status_code self.url = self.driver.current_url except TimeoutException: logger.warning("Page timeout: {}".format(url)) try: scraper_monitor.failed_url(url, 'Timeout') except (NameError, AttributeError): # Happens when scraper_monitor is not being used/setup pass except Exception: logger.exception("Unknown problem with scraper_monitor sending a failed url") except Exception as e: raise e.with_traceback(sys.exc_info()[2]) else: # If an exception was not thrown then check the http status code if status_code < 400: # If the http status code is not an error return self.driver.page_source else: # If http status code is 400 or greater raise SeleniumHTTPError("Status code >= 400", status_code=status_code)
[ "def", "_get_site", "(", "self", ",", "url", ",", "headers", ",", "cookies", ",", "timeout", ",", "driver_args", ",", "driver_kwargs", ")", ":", "try", ":", "# **TODO**: Find what exception this will throw and catch it and call", "# self.driver.execute_script(\"window.stop()\")", "# Then still try and get the source from the page", "self", ".", "driver", ".", "set_page_load_timeout", "(", "timeout", ")", "self", ".", "driver", ".", "get", "(", "url", ")", "header_data", "=", "self", ".", "get_selenium_header", "(", ")", "status_code", "=", "header_data", "[", "'status-code'", "]", "# Set data to access from script", "self", ".", "status_code", "=", "status_code", "self", ".", "url", "=", "self", ".", "driver", ".", "current_url", "except", "TimeoutException", ":", "logger", ".", "warning", "(", "\"Page timeout: {}\"", ".", "format", "(", "url", ")", ")", "try", ":", "scraper_monitor", ".", "failed_url", "(", "url", ",", "'Timeout'", ")", "except", "(", "NameError", ",", "AttributeError", ")", ":", "# Happens when scraper_monitor is not being used/setup", "pass", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Unknown problem with scraper_monitor sending a failed url\"", ")", "except", "Exception", "as", "e", ":", "raise", "e", ".", "with_traceback", "(", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", "else", ":", "# If an exception was not thrown then check the http status code", "if", "status_code", "<", "400", ":", "# If the http status code is not an error", "return", "self", ".", "driver", ".", "page_source", "else", ":", "# If http status code is 400 or greater", "raise", "SeleniumHTTPError", "(", "\"Status code >= 400\"", ",", "status_code", "=", "status_code", ")" ]
Try and return page content in the requested format using selenium
[ "Try", "and", "return", "page", "content", "in", "the", "requested", "format", "using", "selenium" ]
2bfc63caa7d316564088951f01a490db493ea240
https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/selenium_utils.py#L63-L101
243,755
delfick/aws_syncr
aws_syncr/option_spec/encryption_keys.py
EncryptionKeys.sync_one
def sync_one(self, aws_syncr, amazon, key): """Make sure this key is as defined""" key_info = amazon.kms.key_info(key.name, key.location) if not key_info: amazon.kms.create_key(key.name, key.description, key.location, key.grant, key.policy.document) else: amazon.kms.modify_key(key_info, key.name, key.description, key.location, key.grant, key.policy.document)
python
def sync_one(self, aws_syncr, amazon, key): """Make sure this key is as defined""" key_info = amazon.kms.key_info(key.name, key.location) if not key_info: amazon.kms.create_key(key.name, key.description, key.location, key.grant, key.policy.document) else: amazon.kms.modify_key(key_info, key.name, key.description, key.location, key.grant, key.policy.document)
[ "def", "sync_one", "(", "self", ",", "aws_syncr", ",", "amazon", ",", "key", ")", ":", "key_info", "=", "amazon", ".", "kms", ".", "key_info", "(", "key", ".", "name", ",", "key", ".", "location", ")", "if", "not", "key_info", ":", "amazon", ".", "kms", ".", "create_key", "(", "key", ".", "name", ",", "key", ".", "description", ",", "key", ".", "location", ",", "key", ".", "grant", ",", "key", ".", "policy", ".", "document", ")", "else", ":", "amazon", ".", "kms", ".", "modify_key", "(", "key_info", ",", "key", ".", "name", ",", "key", ".", "description", ",", "key", ".", "location", ",", "key", ".", "grant", ",", "key", ".", "policy", ".", "document", ")" ]
Make sure this key is as defined
[ "Make", "sure", "this", "key", "is", "as", "defined" ]
8cd214b27c1eee98dfba4632cbb8bc0ae36356bd
https://github.com/delfick/aws_syncr/blob/8cd214b27c1eee98dfba4632cbb8bc0ae36356bd/aws_syncr/option_spec/encryption_keys.py#L50-L56
243,756
collectiveacuity/labPack
labpack/storage/dropbox.py
dropboxClient._walk
def _walk(self, root_path=''): ''' an iterator method which walks the file structure of the dropbox collection ''' title = '%s._walk' % self.__class__.__name__ if root_path: root_path = '/%s' % root_path try: response = self.dropbox.files_list_folder(path=root_path, recursive=True) for record in response.entries: if not isinstance(record, self.objects.FileMetadata): continue yield record.path_display[1:] if response.has_more: while response.has_more: response = self.dropbox.files_list_folder_continue(response.cursor) for record in response.entries: if not isinstance(record, self.objects.FileMetadata): continue yield record.path_display[1:] except: raise DropboxConnectionError(title)
python
def _walk(self, root_path=''): ''' an iterator method which walks the file structure of the dropbox collection ''' title = '%s._walk' % self.__class__.__name__ if root_path: root_path = '/%s' % root_path try: response = self.dropbox.files_list_folder(path=root_path, recursive=True) for record in response.entries: if not isinstance(record, self.objects.FileMetadata): continue yield record.path_display[1:] if response.has_more: while response.has_more: response = self.dropbox.files_list_folder_continue(response.cursor) for record in response.entries: if not isinstance(record, self.objects.FileMetadata): continue yield record.path_display[1:] except: raise DropboxConnectionError(title)
[ "def", "_walk", "(", "self", ",", "root_path", "=", "''", ")", ":", "title", "=", "'%s._walk'", "%", "self", ".", "__class__", ".", "__name__", "if", "root_path", ":", "root_path", "=", "'/%s'", "%", "root_path", "try", ":", "response", "=", "self", ".", "dropbox", ".", "files_list_folder", "(", "path", "=", "root_path", ",", "recursive", "=", "True", ")", "for", "record", "in", "response", ".", "entries", ":", "if", "not", "isinstance", "(", "record", ",", "self", ".", "objects", ".", "FileMetadata", ")", ":", "continue", "yield", "record", ".", "path_display", "[", "1", ":", "]", "if", "response", ".", "has_more", ":", "while", "response", ".", "has_more", ":", "response", "=", "self", ".", "dropbox", ".", "files_list_folder_continue", "(", "response", ".", "cursor", ")", "for", "record", "in", "response", ".", "entries", ":", "if", "not", "isinstance", "(", "record", ",", "self", ".", "objects", ".", "FileMetadata", ")", ":", "continue", "yield", "record", ".", "path_display", "[", "1", ":", "]", "except", ":", "raise", "DropboxConnectionError", "(", "title", ")" ]
an iterator method which walks the file structure of the dropbox collection
[ "an", "iterator", "method", "which", "walks", "the", "file", "structure", "of", "the", "dropbox", "collection" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/dropbox.py#L277-L296
243,757
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/monitor.py
_read_stdin
def _read_stdin(): """ Generator for reading from standard input in nonblocking mode. Other ways of reading from ``stdin`` in python waits, until the buffer is big enough, or until EOF character is sent. This functions yields immediately after each line. """ line = sys.stdin.readline() while line: yield line line = sys.stdin.readline()
python
def _read_stdin(): """ Generator for reading from standard input in nonblocking mode. Other ways of reading from ``stdin`` in python waits, until the buffer is big enough, or until EOF character is sent. This functions yields immediately after each line. """ line = sys.stdin.readline() while line: yield line line = sys.stdin.readline()
[ "def", "_read_stdin", "(", ")", ":", "line", "=", "sys", ".", "stdin", ".", "readline", "(", ")", "while", "line", ":", "yield", "line", "line", "=", "sys", ".", "stdin", ".", "readline", "(", ")" ]
Generator for reading from standard input in nonblocking mode. Other ways of reading from ``stdin`` in python waits, until the buffer is big enough, or until EOF character is sent. This functions yields immediately after each line.
[ "Generator", "for", "reading", "from", "standard", "input", "in", "nonblocking", "mode", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/monitor.py#L36-L48
243,758
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/monitor.py
_parse_line
def _parse_line(line): """ Convert one line from the extended log to dict. Args: line (str): Line which will be converted. Returns: dict: dict with ``timestamp``, ``command``, ``username`` and ``path`` \ keys. Note: Typical line looks like this:: /home/ftp/xex/asd bsd.dat, xex, STOR, 1398351777 Filename may contain ``,`` character, so I am ``rsplitting`` the line from the end to the beginning. """ line, timestamp = line.rsplit(",", 1) line, command = line.rsplit(",", 1) path, username = line.rsplit(",", 1) return { "timestamp": timestamp.strip(), "command": command.strip(), "username": username.strip(), "path": path, }
python
def _parse_line(line): """ Convert one line from the extended log to dict. Args: line (str): Line which will be converted. Returns: dict: dict with ``timestamp``, ``command``, ``username`` and ``path`` \ keys. Note: Typical line looks like this:: /home/ftp/xex/asd bsd.dat, xex, STOR, 1398351777 Filename may contain ``,`` character, so I am ``rsplitting`` the line from the end to the beginning. """ line, timestamp = line.rsplit(",", 1) line, command = line.rsplit(",", 1) path, username = line.rsplit(",", 1) return { "timestamp": timestamp.strip(), "command": command.strip(), "username": username.strip(), "path": path, }
[ "def", "_parse_line", "(", "line", ")", ":", "line", ",", "timestamp", "=", "line", ".", "rsplit", "(", "\",\"", ",", "1", ")", "line", ",", "command", "=", "line", ".", "rsplit", "(", "\",\"", ",", "1", ")", "path", ",", "username", "=", "line", ".", "rsplit", "(", "\",\"", ",", "1", ")", "return", "{", "\"timestamp\"", ":", "timestamp", ".", "strip", "(", ")", ",", "\"command\"", ":", "command", ".", "strip", "(", ")", ",", "\"username\"", ":", "username", ".", "strip", "(", ")", ",", "\"path\"", ":", "path", ",", "}" ]
Convert one line from the extended log to dict. Args: line (str): Line which will be converted. Returns: dict: dict with ``timestamp``, ``command``, ``username`` and ``path`` \ keys. Note: Typical line looks like this:: /home/ftp/xex/asd bsd.dat, xex, STOR, 1398351777 Filename may contain ``,`` character, so I am ``rsplitting`` the line from the end to the beginning.
[ "Convert", "one", "line", "from", "the", "extended", "log", "to", "dict", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/monitor.py#L51-L79
243,759
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/monitor.py
process_log
def process_log(file_iterator): """ Process the extended ProFTPD log. Args: file_iterator (file): any file-like iterator for reading the log or stdin (see :func:`_read_stdin`). Yields: ImportRequest: with each import. """ for line in file_iterator: if "," not in line: continue parsed = _parse_line(line) if not parsed["command"].upper() in ["DELE", "DEL"]: continue # don't react to anything else, than trigger in form of deleted # "lock" file if os.path.basename(parsed["path"]) != settings.LOCK_FILENAME: continue # react only to lock file in in home directory dir_name = os.path.dirname(parsed["path"]) if settings.LOCK_ONLY_IN_HOME: if dir_name != settings.DATA_PATH + parsed["username"]: continue # deleted user if not os.path.exists(os.path.dirname(parsed["path"])): continue # old record, which doesn't need to be parsed again if os.path.exists(parsed["path"]): continue logger.info( "Request for processing from user '%s'." % parsed["username"] ) yield process_import_request( username=parsed["username"], path=os.path.dirname(parsed["path"]), timestamp=parsed["timestamp"], logger_handler=logger )
python
def process_log(file_iterator): """ Process the extended ProFTPD log. Args: file_iterator (file): any file-like iterator for reading the log or stdin (see :func:`_read_stdin`). Yields: ImportRequest: with each import. """ for line in file_iterator: if "," not in line: continue parsed = _parse_line(line) if not parsed["command"].upper() in ["DELE", "DEL"]: continue # don't react to anything else, than trigger in form of deleted # "lock" file if os.path.basename(parsed["path"]) != settings.LOCK_FILENAME: continue # react only to lock file in in home directory dir_name = os.path.dirname(parsed["path"]) if settings.LOCK_ONLY_IN_HOME: if dir_name != settings.DATA_PATH + parsed["username"]: continue # deleted user if not os.path.exists(os.path.dirname(parsed["path"])): continue # old record, which doesn't need to be parsed again if os.path.exists(parsed["path"]): continue logger.info( "Request for processing from user '%s'." % parsed["username"] ) yield process_import_request( username=parsed["username"], path=os.path.dirname(parsed["path"]), timestamp=parsed["timestamp"], logger_handler=logger )
[ "def", "process_log", "(", "file_iterator", ")", ":", "for", "line", "in", "file_iterator", ":", "if", "\",\"", "not", "in", "line", ":", "continue", "parsed", "=", "_parse_line", "(", "line", ")", "if", "not", "parsed", "[", "\"command\"", "]", ".", "upper", "(", ")", "in", "[", "\"DELE\"", ",", "\"DEL\"", "]", ":", "continue", "# don't react to anything else, than trigger in form of deleted", "# \"lock\" file", "if", "os", ".", "path", ".", "basename", "(", "parsed", "[", "\"path\"", "]", ")", "!=", "settings", ".", "LOCK_FILENAME", ":", "continue", "# react only to lock file in in home directory", "dir_name", "=", "os", ".", "path", ".", "dirname", "(", "parsed", "[", "\"path\"", "]", ")", "if", "settings", ".", "LOCK_ONLY_IN_HOME", ":", "if", "dir_name", "!=", "settings", ".", "DATA_PATH", "+", "parsed", "[", "\"username\"", "]", ":", "continue", "# deleted user", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "parsed", "[", "\"path\"", "]", ")", ")", ":", "continue", "# old record, which doesn't need to be parsed again", "if", "os", ".", "path", ".", "exists", "(", "parsed", "[", "\"path\"", "]", ")", ":", "continue", "logger", ".", "info", "(", "\"Request for processing from user '%s'.\"", "%", "parsed", "[", "\"username\"", "]", ")", "yield", "process_import_request", "(", "username", "=", "parsed", "[", "\"username\"", "]", ",", "path", "=", "os", ".", "path", ".", "dirname", "(", "parsed", "[", "\"path\"", "]", ")", ",", "timestamp", "=", "parsed", "[", "\"timestamp\"", "]", ",", "logger_handler", "=", "logger", ")" ]
Process the extended ProFTPD log. Args: file_iterator (file): any file-like iterator for reading the log or stdin (see :func:`_read_stdin`). Yields: ImportRequest: with each import.
[ "Process", "the", "extended", "ProFTPD", "log", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/monitor.py#L82-L130
243,760
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/monitor.py
main
def main(filename): """ Open `filename` and start processing it line by line. If `filename` is none, process lines from `stdin`. """ if filename: if not os.path.exists(filename): logger.error("'%s' doesn't exists!" % filename) sys.stderr.write("'%s' doesn't exists!\n" % filename) sys.exit(1) logger.info("Processing '%s'" % filename) for ir in process_log(sh.tail("-f", filename, _iter=True)): print ir else: logger.info("Processing stdin.") for ir in process_log(_read_stdin()): print ir
python
def main(filename): """ Open `filename` and start processing it line by line. If `filename` is none, process lines from `stdin`. """ if filename: if not os.path.exists(filename): logger.error("'%s' doesn't exists!" % filename) sys.stderr.write("'%s' doesn't exists!\n" % filename) sys.exit(1) logger.info("Processing '%s'" % filename) for ir in process_log(sh.tail("-f", filename, _iter=True)): print ir else: logger.info("Processing stdin.") for ir in process_log(_read_stdin()): print ir
[ "def", "main", "(", "filename", ")", ":", "if", "filename", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "logger", ".", "error", "(", "\"'%s' doesn't exists!\"", "%", "filename", ")", "sys", ".", "stderr", ".", "write", "(", "\"'%s' doesn't exists!\\n\"", "%", "filename", ")", "sys", ".", "exit", "(", "1", ")", "logger", ".", "info", "(", "\"Processing '%s'\"", "%", "filename", ")", "for", "ir", "in", "process_log", "(", "sh", ".", "tail", "(", "\"-f\"", ",", "filename", ",", "_iter", "=", "True", ")", ")", ":", "print", "ir", "else", ":", "logger", ".", "info", "(", "\"Processing stdin.\"", ")", "for", "ir", "in", "process_log", "(", "_read_stdin", "(", ")", ")", ":", "print", "ir" ]
Open `filename` and start processing it line by line. If `filename` is none, process lines from `stdin`.
[ "Open", "filename", "and", "start", "processing", "it", "line", "by", "line", ".", "If", "filename", "is", "none", "process", "lines", "from", "stdin", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/monitor.py#L133-L150
243,761
sassoo/goldman
goldman/serializers/base.py
Serializer.serialize
def serialize(self, data): """ Invoke the serializer These are common things for all serializers. Mostly, stuff to do with managing headers. The data passed in may not be reliable for much of anything. Conditionally, set the Content-Type header unless it has already been set. """ if not self.resp.content_type: self.resp.set_header('Content-Type', getattr(self, 'MIMETYPE'))
python
def serialize(self, data): """ Invoke the serializer These are common things for all serializers. Mostly, stuff to do with managing headers. The data passed in may not be reliable for much of anything. Conditionally, set the Content-Type header unless it has already been set. """ if not self.resp.content_type: self.resp.set_header('Content-Type', getattr(self, 'MIMETYPE'))
[ "def", "serialize", "(", "self", ",", "data", ")", ":", "if", "not", "self", ".", "resp", ".", "content_type", ":", "self", ".", "resp", ".", "set_header", "(", "'Content-Type'", ",", "getattr", "(", "self", ",", "'MIMETYPE'", ")", ")" ]
Invoke the serializer These are common things for all serializers. Mostly, stuff to do with managing headers. The data passed in may not be reliable for much of anything. Conditionally, set the Content-Type header unless it has already been set.
[ "Invoke", "the", "serializer" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/serializers/base.py#L21-L33
243,762
TC01/calcpkg
calcrepo/info.py
FileInfo.printData
def printData(self, output = sys.stdout): """Output all the file data to be written to any writable output""" self.printDatum("Name : ", self.fileName, output) self.printDatum("Author : ", self.author, output) self.printDatum("Repository : ", self.repository, output) self.printDatum("Category : ", self.category, output) self.printDatum("Downloads : ", self.downloads, output) self.printDatum("Date Uploaded : ", self.fileDate, output) self.printDatum("File Size : ", self.fileSize, output) self.printDatum("Documentation : ", self.documentation, output) self.printDatum("Source Code : ", self.sourceCode, output) self.printDatum("Description : ", self.description, output) # print("\n", output) print >> output, "\n\n"
python
def printData(self, output = sys.stdout): """Output all the file data to be written to any writable output""" self.printDatum("Name : ", self.fileName, output) self.printDatum("Author : ", self.author, output) self.printDatum("Repository : ", self.repository, output) self.printDatum("Category : ", self.category, output) self.printDatum("Downloads : ", self.downloads, output) self.printDatum("Date Uploaded : ", self.fileDate, output) self.printDatum("File Size : ", self.fileSize, output) self.printDatum("Documentation : ", self.documentation, output) self.printDatum("Source Code : ", self.sourceCode, output) self.printDatum("Description : ", self.description, output) # print("\n", output) print >> output, "\n\n"
[ "def", "printData", "(", "self", ",", "output", "=", "sys", ".", "stdout", ")", ":", "self", ".", "printDatum", "(", "\"Name : \"", ",", "self", ".", "fileName", ",", "output", ")", "self", ".", "printDatum", "(", "\"Author : \"", ",", "self", ".", "author", ",", "output", ")", "self", ".", "printDatum", "(", "\"Repository : \"", ",", "self", ".", "repository", ",", "output", ")", "self", ".", "printDatum", "(", "\"Category : \"", ",", "self", ".", "category", ",", "output", ")", "self", ".", "printDatum", "(", "\"Downloads : \"", ",", "self", ".", "downloads", ",", "output", ")", "self", ".", "printDatum", "(", "\"Date Uploaded : \"", ",", "self", ".", "fileDate", ",", "output", ")", "self", ".", "printDatum", "(", "\"File Size : \"", ",", "self", ".", "fileSize", ",", "output", ")", "self", ".", "printDatum", "(", "\"Documentation : \"", ",", "self", ".", "documentation", ",", "output", ")", "self", ".", "printDatum", "(", "\"Source Code : \"", ",", "self", ".", "sourceCode", ",", "output", ")", "self", ".", "printDatum", "(", "\"Description : \"", ",", "self", ".", "description", ",", "output", ")", "#\t\tprint(\"\\n\", output)", "print", ">>", "output", ",", "\"\\n\\n\"" ]
Output all the file data to be written to any writable output
[ "Output", "all", "the", "file", "data", "to", "be", "written", "to", "any", "writable", "output" ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/info.py#L58-L71
243,763
ronaldguillen/wave
wave/serializers.py
raise_errors_on_nested_writes
def raise_errors_on_nested_writes(method_name, serializer, validated_data): """ Give explicit errors when users attempt to pass writable nested data. If we don't do this explicitly they'd get a less helpful error when calling `.save()` on the serializer. We don't *automatically* support these sorts of nested writes because there are too many ambiguities to define a default behavior. Eg. Suppose we have a `UserSerializer` with a nested profile. How should we handle the case of an update, where the `profile` relationship does not exist? Any of the following might be valid: * Raise an application error. * Silently ignore the nested part of the update. * Automatically create a profile instance. """ # Ensure we don't have a writable nested field. For example: # # class UserSerializer(ModelSerializer): # ... # profile = ProfileSerializer() assert not any( isinstance(field, BaseSerializer) and (key in validated_data) and isinstance(validated_data[key], (list, dict)) for key, field in serializer.fields.items() ), ( 'The `.{method_name}()` method does not support writable nested' 'fields by default.\nWrite an explicit `.{method_name}()` method for ' 'serializer `{module}.{class_name}`, or set `read_only=True` on ' 'nested serializer fields.'.format( method_name=method_name, module=serializer.__class__.__module__, class_name=serializer.__class__.__name__ ) ) # Ensure we don't have a writable dotted-source field. For example: # # class UserSerializer(ModelSerializer): # ... # address = serializer.CharField('profile.address') assert not any( '.' in field.source and (key in validated_data) and isinstance(validated_data[key], (list, dict)) for key, field in serializer.fields.items() ), ( 'The `.{method_name}()` method does not support writable dotted-source ' 'fields by default.\nWrite an explicit `.{method_name}()` method for ' 'serializer `{module}.{class_name}`, or set `read_only=True` on ' 'dotted-source serializer fields.'.format( method_name=method_name, module=serializer.__class__.__module__, class_name=serializer.__class__.__name__ ) )
python
def raise_errors_on_nested_writes(method_name, serializer, validated_data): """ Give explicit errors when users attempt to pass writable nested data. If we don't do this explicitly they'd get a less helpful error when calling `.save()` on the serializer. We don't *automatically* support these sorts of nested writes because there are too many ambiguities to define a default behavior. Eg. Suppose we have a `UserSerializer` with a nested profile. How should we handle the case of an update, where the `profile` relationship does not exist? Any of the following might be valid: * Raise an application error. * Silently ignore the nested part of the update. * Automatically create a profile instance. """ # Ensure we don't have a writable nested field. For example: # # class UserSerializer(ModelSerializer): # ... # profile = ProfileSerializer() assert not any( isinstance(field, BaseSerializer) and (key in validated_data) and isinstance(validated_data[key], (list, dict)) for key, field in serializer.fields.items() ), ( 'The `.{method_name}()` method does not support writable nested' 'fields by default.\nWrite an explicit `.{method_name}()` method for ' 'serializer `{module}.{class_name}`, or set `read_only=True` on ' 'nested serializer fields.'.format( method_name=method_name, module=serializer.__class__.__module__, class_name=serializer.__class__.__name__ ) ) # Ensure we don't have a writable dotted-source field. For example: # # class UserSerializer(ModelSerializer): # ... # address = serializer.CharField('profile.address') assert not any( '.' in field.source and (key in validated_data) and isinstance(validated_data[key], (list, dict)) for key, field in serializer.fields.items() ), ( 'The `.{method_name}()` method does not support writable dotted-source ' 'fields by default.\nWrite an explicit `.{method_name}()` method for ' 'serializer `{module}.{class_name}`, or set `read_only=True` on ' 'dotted-source serializer fields.'.format( method_name=method_name, module=serializer.__class__.__module__, class_name=serializer.__class__.__name__ ) )
[ "def", "raise_errors_on_nested_writes", "(", "method_name", ",", "serializer", ",", "validated_data", ")", ":", "# Ensure we don't have a writable nested field. For example:", "#", "# class UserSerializer(ModelSerializer):", "# ...", "# profile = ProfileSerializer()", "assert", "not", "any", "(", "isinstance", "(", "field", ",", "BaseSerializer", ")", "and", "(", "key", "in", "validated_data", ")", "and", "isinstance", "(", "validated_data", "[", "key", "]", ",", "(", "list", ",", "dict", ")", ")", "for", "key", ",", "field", "in", "serializer", ".", "fields", ".", "items", "(", ")", ")", ",", "(", "'The `.{method_name}()` method does not support writable nested'", "'fields by default.\\nWrite an explicit `.{method_name}()` method for '", "'serializer `{module}.{class_name}`, or set `read_only=True` on '", "'nested serializer fields.'", ".", "format", "(", "method_name", "=", "method_name", ",", "module", "=", "serializer", ".", "__class__", ".", "__module__", ",", "class_name", "=", "serializer", ".", "__class__", ".", "__name__", ")", ")", "# Ensure we don't have a writable dotted-source field. For example:", "#", "# class UserSerializer(ModelSerializer):", "# ...", "# address = serializer.CharField('profile.address')", "assert", "not", "any", "(", "'.'", "in", "field", ".", "source", "and", "(", "key", "in", "validated_data", ")", "and", "isinstance", "(", "validated_data", "[", "key", "]", ",", "(", "list", ",", "dict", ")", ")", "for", "key", ",", "field", "in", "serializer", ".", "fields", ".", "items", "(", ")", ")", ",", "(", "'The `.{method_name}()` method does not support writable dotted-source '", "'fields by default.\\nWrite an explicit `.{method_name}()` method for '", "'serializer `{module}.{class_name}`, or set `read_only=True` on '", "'dotted-source serializer fields.'", ".", "format", "(", "method_name", "=", "method_name", ",", "module", "=", "serializer", ".", "__class__", ".", "__module__", ",", "class_name", "=", "serializer", ".", "__class__", ".", "__name__", ")", ")" ]
Give explicit errors when users attempt to pass writable nested data. If we don't do this explicitly they'd get a less helpful error when calling `.save()` on the serializer. We don't *automatically* support these sorts of nested writes because there are too many ambiguities to define a default behavior. Eg. Suppose we have a `UserSerializer` with a nested profile. How should we handle the case of an update, where the `profile` relationship does not exist? Any of the following might be valid: * Raise an application error. * Silently ignore the nested part of the update. * Automatically create a profile instance.
[ "Give", "explicit", "errors", "when", "users", "attempt", "to", "pass", "writable", "nested", "data", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L688-L747
243,764
ronaldguillen/wave
wave/serializers.py
BaseSerializer.many_init
def many_init(cls, *args, **kwargs): """ This method implements the creation of a `ListSerializer` parent class when `many=True` is used. You can customize it if you need to control which keyword arguments are passed to the parent, and which are passed to the child. Note that we're over-cautious in passing most arguments to both parent and child classes in order to try to cover the general case. If you're overriding this method you'll probably want something much simpler, eg: @classmethod def many_init(cls, *args, **kwargs): kwargs['child'] = cls() return CustomListSerializer(*args, **kwargs) """ allow_empty = kwargs.pop('allow_empty', None) child_serializer = cls(*args, **kwargs) list_kwargs = { 'child': child_serializer, } if allow_empty is not None: list_kwargs['allow_empty'] = allow_empty list_kwargs.update({ key: value for key, value in kwargs.items() if key in LIST_SERIALIZER_KWARGS }) meta = getattr(cls, 'Meta', None) list_serializer_class = getattr(meta, 'list_serializer_class', ListSerializer) return list_serializer_class(*args, **list_kwargs)
python
def many_init(cls, *args, **kwargs): """ This method implements the creation of a `ListSerializer` parent class when `many=True` is used. You can customize it if you need to control which keyword arguments are passed to the parent, and which are passed to the child. Note that we're over-cautious in passing most arguments to both parent and child classes in order to try to cover the general case. If you're overriding this method you'll probably want something much simpler, eg: @classmethod def many_init(cls, *args, **kwargs): kwargs['child'] = cls() return CustomListSerializer(*args, **kwargs) """ allow_empty = kwargs.pop('allow_empty', None) child_serializer = cls(*args, **kwargs) list_kwargs = { 'child': child_serializer, } if allow_empty is not None: list_kwargs['allow_empty'] = allow_empty list_kwargs.update({ key: value for key, value in kwargs.items() if key in LIST_SERIALIZER_KWARGS }) meta = getattr(cls, 'Meta', None) list_serializer_class = getattr(meta, 'list_serializer_class', ListSerializer) return list_serializer_class(*args, **list_kwargs)
[ "def", "many_init", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "allow_empty", "=", "kwargs", ".", "pop", "(", "'allow_empty'", ",", "None", ")", "child_serializer", "=", "cls", "(", "*", "args", ",", "*", "*", "kwargs", ")", "list_kwargs", "=", "{", "'child'", ":", "child_serializer", ",", "}", "if", "allow_empty", "is", "not", "None", ":", "list_kwargs", "[", "'allow_empty'", "]", "=", "allow_empty", "list_kwargs", ".", "update", "(", "{", "key", ":", "value", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", "if", "key", "in", "LIST_SERIALIZER_KWARGS", "}", ")", "meta", "=", "getattr", "(", "cls", ",", "'Meta'", ",", "None", ")", "list_serializer_class", "=", "getattr", "(", "meta", ",", "'list_serializer_class'", ",", "ListSerializer", ")", "return", "list_serializer_class", "(", "*", "args", ",", "*", "*", "list_kwargs", ")" ]
This method implements the creation of a `ListSerializer` parent class when `many=True` is used. You can customize it if you need to control which keyword arguments are passed to the parent, and which are passed to the child. Note that we're over-cautious in passing most arguments to both parent and child classes in order to try to cover the general case. If you're overriding this method you'll probably want something much simpler, eg: @classmethod def many_init(cls, *args, **kwargs): kwargs['child'] = cls() return CustomListSerializer(*args, **kwargs)
[ "This", "method", "implements", "the", "creation", "of", "a", "ListSerializer", "parent", "class", "when", "many", "=", "True", "is", "used", ".", "You", "can", "customize", "it", "if", "you", "need", "to", "control", "which", "keyword", "arguments", "are", "passed", "to", "the", "parent", "and", "which", "are", "passed", "to", "the", "child", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L105-L134
243,765
ronaldguillen/wave
wave/serializers.py
ListSerializer.get_value
def get_value(self, dictionary): """ Given the input dictionary, return the field value. """ # We override the default field access in order to support # lists in HTML forms. if html.is_html_input(dictionary): return html.parse_html_list(dictionary, prefix=self.field_name) return dictionary.get(self.field_name, empty)
python
def get_value(self, dictionary): """ Given the input dictionary, return the field value. """ # We override the default field access in order to support # lists in HTML forms. if html.is_html_input(dictionary): return html.parse_html_list(dictionary, prefix=self.field_name) return dictionary.get(self.field_name, empty)
[ "def", "get_value", "(", "self", ",", "dictionary", ")", ":", "# We override the default field access in order to support", "# lists in HTML forms.", "if", "html", ".", "is_html_input", "(", "dictionary", ")", ":", "return", "html", ".", "parse_html_list", "(", "dictionary", ",", "prefix", "=", "self", ".", "field_name", ")", "return", "dictionary", ".", "get", "(", "self", ".", "field_name", ",", "empty", ")" ]
Given the input dictionary, return the field value.
[ "Given", "the", "input", "dictionary", "return", "the", "field", "value", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L537-L545
243,766
ronaldguillen/wave
wave/serializers.py
ModelSerializer.get_field_names
def get_field_names(self, declared_fields, info): """ Returns the list of all field names that should be created when instantiating this serializer class. This is based on the default set of fields, but also takes into account the `Meta.fields` or `Meta.exclude` options if they have been specified. """ fields = getattr(self.Meta, 'fields', None) exclude = getattr(self.Meta, 'exclude', None) if fields and fields != ALL_FIELDS and not isinstance(fields, (list, tuple)): raise TypeError( 'The `fields` option must be a list or tuple or "__all__". ' 'Got %s.' % type(fields).__name__ ) if exclude and not isinstance(exclude, (list, tuple)): raise TypeError( 'The `exclude` option must be a list or tuple. Got %s.' % type(exclude).__name__ ) assert not (fields and exclude), ( "Cannot set both 'fields' and 'exclude' options on " "serializer {serializer_class}.".format( serializer_class=self.__class__.__name__ ) ) if fields is None and exclude is None: warnings.warn( "Creating a ModelSerializer without either the 'fields' " "attribute or the 'exclude' attribute is pending deprecation " "since 3.3.0. Add an explicit fields = '__all__' to the " "{serializer_class} serializer.".format( serializer_class=self.__class__.__name__ ), PendingDeprecationWarning ) if fields == ALL_FIELDS: fields = None if fields is not None: # Ensure that all declared fields have also been included in the # `Meta.fields` option. # Do not require any fields that are declared a parent class, # in order to allow serializer subclasses to only include # a subset of fields. required_field_names = set(declared_fields) for cls in self.__class__.__bases__: required_field_names -= set(getattr(cls, '_declared_fields', [])) for field_name in required_field_names: assert field_name in fields, ( "The field '{field_name}' was declared on serializer " "{serializer_class}, but has not been included in the " "'fields' option.".format( field_name=field_name, serializer_class=self.__class__.__name__ ) ) return fields # Use the default set of field names if `Meta.fields` is not specified. fields = self.get_default_field_names(declared_fields, info) if exclude is not None: # If `Meta.exclude` is included, then remove those fields. for field_name in exclude: assert field_name in fields, ( "The field '{field_name}' was included on serializer " "{serializer_class} in the 'exclude' option, but does " "not match any model field.".format( field_name=field_name, serializer_class=self.__class__.__name__ ) ) fields.remove(field_name) return fields
python
def get_field_names(self, declared_fields, info): """ Returns the list of all field names that should be created when instantiating this serializer class. This is based on the default set of fields, but also takes into account the `Meta.fields` or `Meta.exclude` options if they have been specified. """ fields = getattr(self.Meta, 'fields', None) exclude = getattr(self.Meta, 'exclude', None) if fields and fields != ALL_FIELDS and not isinstance(fields, (list, tuple)): raise TypeError( 'The `fields` option must be a list or tuple or "__all__". ' 'Got %s.' % type(fields).__name__ ) if exclude and not isinstance(exclude, (list, tuple)): raise TypeError( 'The `exclude` option must be a list or tuple. Got %s.' % type(exclude).__name__ ) assert not (fields and exclude), ( "Cannot set both 'fields' and 'exclude' options on " "serializer {serializer_class}.".format( serializer_class=self.__class__.__name__ ) ) if fields is None and exclude is None: warnings.warn( "Creating a ModelSerializer without either the 'fields' " "attribute or the 'exclude' attribute is pending deprecation " "since 3.3.0. Add an explicit fields = '__all__' to the " "{serializer_class} serializer.".format( serializer_class=self.__class__.__name__ ), PendingDeprecationWarning ) if fields == ALL_FIELDS: fields = None if fields is not None: # Ensure that all declared fields have also been included in the # `Meta.fields` option. # Do not require any fields that are declared a parent class, # in order to allow serializer subclasses to only include # a subset of fields. required_field_names = set(declared_fields) for cls in self.__class__.__bases__: required_field_names -= set(getattr(cls, '_declared_fields', [])) for field_name in required_field_names: assert field_name in fields, ( "The field '{field_name}' was declared on serializer " "{serializer_class}, but has not been included in the " "'fields' option.".format( field_name=field_name, serializer_class=self.__class__.__name__ ) ) return fields # Use the default set of field names if `Meta.fields` is not specified. fields = self.get_default_field_names(declared_fields, info) if exclude is not None: # If `Meta.exclude` is included, then remove those fields. for field_name in exclude: assert field_name in fields, ( "The field '{field_name}' was included on serializer " "{serializer_class} in the 'exclude' option, but does " "not match any model field.".format( field_name=field_name, serializer_class=self.__class__.__name__ ) ) fields.remove(field_name) return fields
[ "def", "get_field_names", "(", "self", ",", "declared_fields", ",", "info", ")", ":", "fields", "=", "getattr", "(", "self", ".", "Meta", ",", "'fields'", ",", "None", ")", "exclude", "=", "getattr", "(", "self", ".", "Meta", ",", "'exclude'", ",", "None", ")", "if", "fields", "and", "fields", "!=", "ALL_FIELDS", "and", "not", "isinstance", "(", "fields", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "'The `fields` option must be a list or tuple or \"__all__\". '", "'Got %s.'", "%", "type", "(", "fields", ")", ".", "__name__", ")", "if", "exclude", "and", "not", "isinstance", "(", "exclude", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "'The `exclude` option must be a list or tuple. Got %s.'", "%", "type", "(", "exclude", ")", ".", "__name__", ")", "assert", "not", "(", "fields", "and", "exclude", ")", ",", "(", "\"Cannot set both 'fields' and 'exclude' options on \"", "\"serializer {serializer_class}.\"", ".", "format", "(", "serializer_class", "=", "self", ".", "__class__", ".", "__name__", ")", ")", "if", "fields", "is", "None", "and", "exclude", "is", "None", ":", "warnings", ".", "warn", "(", "\"Creating a ModelSerializer without either the 'fields' \"", "\"attribute or the 'exclude' attribute is pending deprecation \"", "\"since 3.3.0. Add an explicit fields = '__all__' to the \"", "\"{serializer_class} serializer.\"", ".", "format", "(", "serializer_class", "=", "self", ".", "__class__", ".", "__name__", ")", ",", "PendingDeprecationWarning", ")", "if", "fields", "==", "ALL_FIELDS", ":", "fields", "=", "None", "if", "fields", "is", "not", "None", ":", "# Ensure that all declared fields have also been included in the", "# `Meta.fields` option.", "# Do not require any fields that are declared a parent class,", "# in order to allow serializer subclasses to only include", "# a subset of fields.", "required_field_names", "=", "set", "(", "declared_fields", ")", "for", "cls", "in", "self", ".", "__class__", ".", "__bases__", ":", "required_field_names", "-=", "set", "(", "getattr", "(", "cls", ",", "'_declared_fields'", ",", "[", "]", ")", ")", "for", "field_name", "in", "required_field_names", ":", "assert", "field_name", "in", "fields", ",", "(", "\"The field '{field_name}' was declared on serializer \"", "\"{serializer_class}, but has not been included in the \"", "\"'fields' option.\"", ".", "format", "(", "field_name", "=", "field_name", ",", "serializer_class", "=", "self", ".", "__class__", ".", "__name__", ")", ")", "return", "fields", "# Use the default set of field names if `Meta.fields` is not specified.", "fields", "=", "self", ".", "get_default_field_names", "(", "declared_fields", ",", "info", ")", "if", "exclude", "is", "not", "None", ":", "# If `Meta.exclude` is included, then remove those fields.", "for", "field_name", "in", "exclude", ":", "assert", "field_name", "in", "fields", ",", "(", "\"The field '{field_name}' was included on serializer \"", "\"{serializer_class} in the 'exclude' option, but does \"", "\"not match any model field.\"", ".", "format", "(", "field_name", "=", "field_name", ",", "serializer_class", "=", "self", ".", "__class__", ".", "__name__", ")", ")", "fields", ".", "remove", "(", "field_name", ")", "return", "fields" ]
Returns the list of all field names that should be created when instantiating this serializer class. This is based on the default set of fields, but also takes into account the `Meta.fields` or `Meta.exclude` options if they have been specified.
[ "Returns", "the", "list", "of", "all", "field", "names", "that", "should", "be", "created", "when", "instantiating", "this", "serializer", "class", ".", "This", "is", "based", "on", "the", "default", "set", "of", "fields", "but", "also", "takes", "into", "account", "the", "Meta", ".", "fields", "or", "Meta", ".", "exclude", "options", "if", "they", "have", "been", "specified", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L958-L1039
243,767
ronaldguillen/wave
wave/serializers.py
ModelSerializer.build_standard_field
def build_standard_field(self, field_name, model_field): """ Create regular model fields. """ field_mapping = ClassLookupDict(self.serializer_field_mapping) field_class = field_mapping[model_field] field_kwargs = get_field_kwargs(field_name, model_field) if 'choices' in field_kwargs: # Fields with choices get coerced into `ChoiceField` # instead of using their regular typed field. field_class = self.serializer_choice_field # Some model fields may introduce kwargs that would not be valid # for the choice field. We need to strip these out. # Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES) valid_kwargs = set(( 'read_only', 'write_only', 'required', 'default', 'initial', 'source', 'label', 'help_text', 'style', 'error_messages', 'validators', 'allow_null', 'allow_blank', 'choices' )) for key in list(field_kwargs.keys()): if key not in valid_kwargs: field_kwargs.pop(key) if not issubclass(field_class, ModelField): # `model_field` is only valid for the fallback case of # `ModelField`, which is used when no other typed field # matched to the model field. field_kwargs.pop('model_field', None) if not issubclass(field_class, CharField) and not issubclass(field_class, ChoiceField): # `allow_blank` is only valid for textual fields. field_kwargs.pop('allow_blank', None) if postgres_fields and isinstance(model_field, postgres_fields.ArrayField): # Populate the `child` argument on `ListField` instances generated # for the PostgrSQL specfic `ArrayField`. child_model_field = model_field.base_field child_field_class, child_field_kwargs = self.build_standard_field( 'child', child_model_field ) field_kwargs['child'] = child_field_class(**child_field_kwargs) return field_class, field_kwargs
python
def build_standard_field(self, field_name, model_field): """ Create regular model fields. """ field_mapping = ClassLookupDict(self.serializer_field_mapping) field_class = field_mapping[model_field] field_kwargs = get_field_kwargs(field_name, model_field) if 'choices' in field_kwargs: # Fields with choices get coerced into `ChoiceField` # instead of using their regular typed field. field_class = self.serializer_choice_field # Some model fields may introduce kwargs that would not be valid # for the choice field. We need to strip these out. # Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES) valid_kwargs = set(( 'read_only', 'write_only', 'required', 'default', 'initial', 'source', 'label', 'help_text', 'style', 'error_messages', 'validators', 'allow_null', 'allow_blank', 'choices' )) for key in list(field_kwargs.keys()): if key not in valid_kwargs: field_kwargs.pop(key) if not issubclass(field_class, ModelField): # `model_field` is only valid for the fallback case of # `ModelField`, which is used when no other typed field # matched to the model field. field_kwargs.pop('model_field', None) if not issubclass(field_class, CharField) and not issubclass(field_class, ChoiceField): # `allow_blank` is only valid for textual fields. field_kwargs.pop('allow_blank', None) if postgres_fields and isinstance(model_field, postgres_fields.ArrayField): # Populate the `child` argument on `ListField` instances generated # for the PostgrSQL specfic `ArrayField`. child_model_field = model_field.base_field child_field_class, child_field_kwargs = self.build_standard_field( 'child', child_model_field ) field_kwargs['child'] = child_field_class(**child_field_kwargs) return field_class, field_kwargs
[ "def", "build_standard_field", "(", "self", ",", "field_name", ",", "model_field", ")", ":", "field_mapping", "=", "ClassLookupDict", "(", "self", ".", "serializer_field_mapping", ")", "field_class", "=", "field_mapping", "[", "model_field", "]", "field_kwargs", "=", "get_field_kwargs", "(", "field_name", ",", "model_field", ")", "if", "'choices'", "in", "field_kwargs", ":", "# Fields with choices get coerced into `ChoiceField`", "# instead of using their regular typed field.", "field_class", "=", "self", ".", "serializer_choice_field", "# Some model fields may introduce kwargs that would not be valid", "# for the choice field. We need to strip these out.", "# Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)", "valid_kwargs", "=", "set", "(", "(", "'read_only'", ",", "'write_only'", ",", "'required'", ",", "'default'", ",", "'initial'", ",", "'source'", ",", "'label'", ",", "'help_text'", ",", "'style'", ",", "'error_messages'", ",", "'validators'", ",", "'allow_null'", ",", "'allow_blank'", ",", "'choices'", ")", ")", "for", "key", "in", "list", "(", "field_kwargs", ".", "keys", "(", ")", ")", ":", "if", "key", "not", "in", "valid_kwargs", ":", "field_kwargs", ".", "pop", "(", "key", ")", "if", "not", "issubclass", "(", "field_class", ",", "ModelField", ")", ":", "# `model_field` is only valid for the fallback case of", "# `ModelField`, which is used when no other typed field", "# matched to the model field.", "field_kwargs", ".", "pop", "(", "'model_field'", ",", "None", ")", "if", "not", "issubclass", "(", "field_class", ",", "CharField", ")", "and", "not", "issubclass", "(", "field_class", ",", "ChoiceField", ")", ":", "# `allow_blank` is only valid for textual fields.", "field_kwargs", ".", "pop", "(", "'allow_blank'", ",", "None", ")", "if", "postgres_fields", "and", "isinstance", "(", "model_field", ",", "postgres_fields", ".", "ArrayField", ")", ":", "# Populate the `child` argument on `ListField` instances generated", "# for the PostgrSQL specfic `ArrayField`.", "child_model_field", "=", "model_field", ".", "base_field", "child_field_class", ",", "child_field_kwargs", "=", "self", ".", "build_standard_field", "(", "'child'", ",", "child_model_field", ")", "field_kwargs", "[", "'child'", "]", "=", "child_field_class", "(", "*", "*", "child_field_kwargs", ")", "return", "field_class", ",", "field_kwargs" ]
Create regular model fields.
[ "Create", "regular", "model", "fields", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L1078-L1124
243,768
ronaldguillen/wave
wave/serializers.py
ModelSerializer.build_relational_field
def build_relational_field(self, field_name, relation_info): """ Create fields for forward and reverse relationships. """ field_class = self.serializer_related_field field_kwargs = get_relation_kwargs(field_name, relation_info) to_field = field_kwargs.pop('to_field', None) if to_field and not relation_info.related_model._meta.get_field(to_field).primary_key: field_kwargs['slug_field'] = to_field field_class = self.serializer_related_to_field # `view_name` is only valid for hyperlinked relationships. if not issubclass(field_class, HyperlinkedRelatedField): field_kwargs.pop('view_name', None) return field_class, field_kwargs
python
def build_relational_field(self, field_name, relation_info): """ Create fields for forward and reverse relationships. """ field_class = self.serializer_related_field field_kwargs = get_relation_kwargs(field_name, relation_info) to_field = field_kwargs.pop('to_field', None) if to_field and not relation_info.related_model._meta.get_field(to_field).primary_key: field_kwargs['slug_field'] = to_field field_class = self.serializer_related_to_field # `view_name` is only valid for hyperlinked relationships. if not issubclass(field_class, HyperlinkedRelatedField): field_kwargs.pop('view_name', None) return field_class, field_kwargs
[ "def", "build_relational_field", "(", "self", ",", "field_name", ",", "relation_info", ")", ":", "field_class", "=", "self", ".", "serializer_related_field", "field_kwargs", "=", "get_relation_kwargs", "(", "field_name", ",", "relation_info", ")", "to_field", "=", "field_kwargs", ".", "pop", "(", "'to_field'", ",", "None", ")", "if", "to_field", "and", "not", "relation_info", ".", "related_model", ".", "_meta", ".", "get_field", "(", "to_field", ")", ".", "primary_key", ":", "field_kwargs", "[", "'slug_field'", "]", "=", "to_field", "field_class", "=", "self", ".", "serializer_related_to_field", "# `view_name` is only valid for hyperlinked relationships.", "if", "not", "issubclass", "(", "field_class", ",", "HyperlinkedRelatedField", ")", ":", "field_kwargs", ".", "pop", "(", "'view_name'", ",", "None", ")", "return", "field_class", ",", "field_kwargs" ]
Create fields for forward and reverse relationships.
[ "Create", "fields", "for", "forward", "and", "reverse", "relationships", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L1126-L1142
243,769
ronaldguillen/wave
wave/serializers.py
ModelSerializer.include_extra_kwargs
def include_extra_kwargs(self, kwargs, extra_kwargs): """ Include any 'extra_kwargs' that have been included for this field, possibly removing any incompatible existing keyword arguments. """ if extra_kwargs.get('read_only', False): for attr in [ 'required', 'default', 'allow_blank', 'allow_null', 'min_length', 'max_length', 'min_value', 'max_value', 'validators', 'queryset' ]: kwargs.pop(attr, None) if extra_kwargs.get('default') and kwargs.get('required') is False: kwargs.pop('required') if extra_kwargs.get('read_only', kwargs.get('read_only', False)): extra_kwargs.pop('required', None) # Read only fields should always omit the 'required' argument. kwargs.update(extra_kwargs) return kwargs
python
def include_extra_kwargs(self, kwargs, extra_kwargs): """ Include any 'extra_kwargs' that have been included for this field, possibly removing any incompatible existing keyword arguments. """ if extra_kwargs.get('read_only', False): for attr in [ 'required', 'default', 'allow_blank', 'allow_null', 'min_length', 'max_length', 'min_value', 'max_value', 'validators', 'queryset' ]: kwargs.pop(attr, None) if extra_kwargs.get('default') and kwargs.get('required') is False: kwargs.pop('required') if extra_kwargs.get('read_only', kwargs.get('read_only', False)): extra_kwargs.pop('required', None) # Read only fields should always omit the 'required' argument. kwargs.update(extra_kwargs) return kwargs
[ "def", "include_extra_kwargs", "(", "self", ",", "kwargs", ",", "extra_kwargs", ")", ":", "if", "extra_kwargs", ".", "get", "(", "'read_only'", ",", "False", ")", ":", "for", "attr", "in", "[", "'required'", ",", "'default'", ",", "'allow_blank'", ",", "'allow_null'", ",", "'min_length'", ",", "'max_length'", ",", "'min_value'", ",", "'max_value'", ",", "'validators'", ",", "'queryset'", "]", ":", "kwargs", ".", "pop", "(", "attr", ",", "None", ")", "if", "extra_kwargs", ".", "get", "(", "'default'", ")", "and", "kwargs", ".", "get", "(", "'required'", ")", "is", "False", ":", "kwargs", ".", "pop", "(", "'required'", ")", "if", "extra_kwargs", ".", "get", "(", "'read_only'", ",", "kwargs", ".", "get", "(", "'read_only'", ",", "False", ")", ")", ":", "extra_kwargs", ".", "pop", "(", "'required'", ",", "None", ")", "# Read only fields should always omit the 'required' argument.", "kwargs", ".", "update", "(", "extra_kwargs", ")", "return", "kwargs" ]
Include any 'extra_kwargs' that have been included for this field, possibly removing any incompatible existing keyword arguments.
[ "Include", "any", "extra_kwargs", "that", "have", "been", "included", "for", "this", "field", "possibly", "removing", "any", "incompatible", "existing", "keyword", "arguments", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L1185-L1206
243,770
ronaldguillen/wave
wave/serializers.py
ModelSerializer.get_extra_kwargs
def get_extra_kwargs(self): """ Return a dictionary mapping field names to a dictionary of additional keyword arguments. """ extra_kwargs = getattr(self.Meta, 'extra_kwargs', {}) read_only_fields = getattr(self.Meta, 'read_only_fields', None) if read_only_fields is not None: for field_name in read_only_fields: kwargs = extra_kwargs.get(field_name, {}) kwargs['read_only'] = True extra_kwargs[field_name] = kwargs return extra_kwargs
python
def get_extra_kwargs(self): """ Return a dictionary mapping field names to a dictionary of additional keyword arguments. """ extra_kwargs = getattr(self.Meta, 'extra_kwargs', {}) read_only_fields = getattr(self.Meta, 'read_only_fields', None) if read_only_fields is not None: for field_name in read_only_fields: kwargs = extra_kwargs.get(field_name, {}) kwargs['read_only'] = True extra_kwargs[field_name] = kwargs return extra_kwargs
[ "def", "get_extra_kwargs", "(", "self", ")", ":", "extra_kwargs", "=", "getattr", "(", "self", ".", "Meta", ",", "'extra_kwargs'", ",", "{", "}", ")", "read_only_fields", "=", "getattr", "(", "self", ".", "Meta", ",", "'read_only_fields'", ",", "None", ")", "if", "read_only_fields", "is", "not", "None", ":", "for", "field_name", "in", "read_only_fields", ":", "kwargs", "=", "extra_kwargs", ".", "get", "(", "field_name", ",", "{", "}", ")", "kwargs", "[", "'read_only'", "]", "=", "True", "extra_kwargs", "[", "field_name", "]", "=", "kwargs", "return", "extra_kwargs" ]
Return a dictionary mapping field names to a dictionary of additional keyword arguments.
[ "Return", "a", "dictionary", "mapping", "field", "names", "to", "a", "dictionary", "of", "additional", "keyword", "arguments", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L1210-L1224
243,771
ronaldguillen/wave
wave/serializers.py
ModelSerializer._get_model_fields
def _get_model_fields(self, field_names, declared_fields, extra_kwargs): """ Returns all the model fields that are being mapped to by fields on the serializer class. Returned as a dict of 'model field name' -> 'model field'. Used internally by `get_uniqueness_field_options`. """ model = getattr(self.Meta, 'model') model_fields = {} for field_name in field_names: if field_name in declared_fields: # If the field is declared on the serializer field = declared_fields[field_name] source = field.source or field_name else: try: source = extra_kwargs[field_name]['source'] except KeyError: source = field_name if '.' in source or source == '*': # Model fields will always have a simple source mapping, # they can't be nested attribute lookups. continue try: field = model._meta.get_field(source) if isinstance(field, DjangoModelField): model_fields[source] = field except FieldDoesNotExist: pass return model_fields
python
def _get_model_fields(self, field_names, declared_fields, extra_kwargs): """ Returns all the model fields that are being mapped to by fields on the serializer class. Returned as a dict of 'model field name' -> 'model field'. Used internally by `get_uniqueness_field_options`. """ model = getattr(self.Meta, 'model') model_fields = {} for field_name in field_names: if field_name in declared_fields: # If the field is declared on the serializer field = declared_fields[field_name] source = field.source or field_name else: try: source = extra_kwargs[field_name]['source'] except KeyError: source = field_name if '.' in source or source == '*': # Model fields will always have a simple source mapping, # they can't be nested attribute lookups. continue try: field = model._meta.get_field(source) if isinstance(field, DjangoModelField): model_fields[source] = field except FieldDoesNotExist: pass return model_fields
[ "def", "_get_model_fields", "(", "self", ",", "field_names", ",", "declared_fields", ",", "extra_kwargs", ")", ":", "model", "=", "getattr", "(", "self", ".", "Meta", ",", "'model'", ")", "model_fields", "=", "{", "}", "for", "field_name", "in", "field_names", ":", "if", "field_name", "in", "declared_fields", ":", "# If the field is declared on the serializer", "field", "=", "declared_fields", "[", "field_name", "]", "source", "=", "field", ".", "source", "or", "field_name", "else", ":", "try", ":", "source", "=", "extra_kwargs", "[", "field_name", "]", "[", "'source'", "]", "except", "KeyError", ":", "source", "=", "field_name", "if", "'.'", "in", "source", "or", "source", "==", "'*'", ":", "# Model fields will always have a simple source mapping,", "# they can't be nested attribute lookups.", "continue", "try", ":", "field", "=", "model", ".", "_meta", ".", "get_field", "(", "source", ")", "if", "isinstance", "(", "field", ",", "DjangoModelField", ")", ":", "model_fields", "[", "source", "]", "=", "field", "except", "FieldDoesNotExist", ":", "pass", "return", "model_fields" ]
Returns all the model fields that are being mapped to by fields on the serializer class. Returned as a dict of 'model field name' -> 'model field'. Used internally by `get_uniqueness_field_options`.
[ "Returns", "all", "the", "model", "fields", "that", "are", "being", "mapped", "to", "by", "fields", "on", "the", "serializer", "class", ".", "Returned", "as", "a", "dict", "of", "model", "field", "name", "-", ">", "model", "field", ".", "Used", "internally", "by", "get_uniqueness_field_options", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L1298-L1331
243,772
ronaldguillen/wave
wave/serializers.py
ModelSerializer.get_validators
def get_validators(self): """ Determine the set of validators to use when instantiating serializer. """ # If the validators have been declared explicitly then use that. validators = getattr(getattr(self, 'Meta', None), 'validators', None) if validators is not None: return validators[:] # Otherwise use the default set of validators. return ( self.get_unique_together_validators() + self.get_unique_for_date_validators() )
python
def get_validators(self): """ Determine the set of validators to use when instantiating serializer. """ # If the validators have been declared explicitly then use that. validators = getattr(getattr(self, 'Meta', None), 'validators', None) if validators is not None: return validators[:] # Otherwise use the default set of validators. return ( self.get_unique_together_validators() + self.get_unique_for_date_validators() )
[ "def", "get_validators", "(", "self", ")", ":", "# If the validators have been declared explicitly then use that.", "validators", "=", "getattr", "(", "getattr", "(", "self", ",", "'Meta'", ",", "None", ")", ",", "'validators'", ",", "None", ")", "if", "validators", "is", "not", "None", ":", "return", "validators", "[", ":", "]", "# Otherwise use the default set of validators.", "return", "(", "self", ".", "get_unique_together_validators", "(", ")", "+", "self", ".", "get_unique_for_date_validators", "(", ")", ")" ]
Determine the set of validators to use when instantiating serializer.
[ "Determine", "the", "set", "of", "validators", "to", "use", "when", "instantiating", "serializer", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L1335-L1348
243,773
ronaldguillen/wave
wave/serializers.py
ModelSerializer.get_unique_together_validators
def get_unique_together_validators(self): """ Determine a default set of validators for any unique_together contraints. """ model_class_inheritance_tree = ( [self.Meta.model] + list(self.Meta.model._meta.parents.keys()) ) # The field names we're passing though here only include fields # which may map onto a model field. Any dotted field name lookups # cannot map to a field, and must be a traversal, so we're not # including those. field_names = { field.source for field in self.fields.values() if (field.source != '*') and ('.' not in field.source) } # Note that we make sure to check `unique_together` both on the # base model class, but also on any parent classes. validators = [] for parent_class in model_class_inheritance_tree: for unique_together in parent_class._meta.unique_together: if field_names.issuperset(set(unique_together)): validator = UniqueTogetherValidator( queryset=parent_class._default_manager, fields=unique_together ) validators.append(validator) return validators
python
def get_unique_together_validators(self): """ Determine a default set of validators for any unique_together contraints. """ model_class_inheritance_tree = ( [self.Meta.model] + list(self.Meta.model._meta.parents.keys()) ) # The field names we're passing though here only include fields # which may map onto a model field. Any dotted field name lookups # cannot map to a field, and must be a traversal, so we're not # including those. field_names = { field.source for field in self.fields.values() if (field.source != '*') and ('.' not in field.source) } # Note that we make sure to check `unique_together` both on the # base model class, but also on any parent classes. validators = [] for parent_class in model_class_inheritance_tree: for unique_together in parent_class._meta.unique_together: if field_names.issuperset(set(unique_together)): validator = UniqueTogetherValidator( queryset=parent_class._default_manager, fields=unique_together ) validators.append(validator) return validators
[ "def", "get_unique_together_validators", "(", "self", ")", ":", "model_class_inheritance_tree", "=", "(", "[", "self", ".", "Meta", ".", "model", "]", "+", "list", "(", "self", ".", "Meta", ".", "model", ".", "_meta", ".", "parents", ".", "keys", "(", ")", ")", ")", "# The field names we're passing though here only include fields", "# which may map onto a model field. Any dotted field name lookups", "# cannot map to a field, and must be a traversal, so we're not", "# including those.", "field_names", "=", "{", "field", ".", "source", "for", "field", "in", "self", ".", "fields", ".", "values", "(", ")", "if", "(", "field", ".", "source", "!=", "'*'", ")", "and", "(", "'.'", "not", "in", "field", ".", "source", ")", "}", "# Note that we make sure to check `unique_together` both on the", "# base model class, but also on any parent classes.", "validators", "=", "[", "]", "for", "parent_class", "in", "model_class_inheritance_tree", ":", "for", "unique_together", "in", "parent_class", ".", "_meta", ".", "unique_together", ":", "if", "field_names", ".", "issuperset", "(", "set", "(", "unique_together", ")", ")", ":", "validator", "=", "UniqueTogetherValidator", "(", "queryset", "=", "parent_class", ".", "_default_manager", ",", "fields", "=", "unique_together", ")", "validators", ".", "append", "(", "validator", ")", "return", "validators" ]
Determine a default set of validators for any unique_together contraints.
[ "Determine", "a", "default", "set", "of", "validators", "for", "any", "unique_together", "contraints", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L1350-L1379
243,774
ronaldguillen/wave
wave/serializers.py
HyperlinkedModelSerializer.build_nested_field
def build_nested_field(self, field_name, relation_info, nested_depth): """ Create nested fields for forward and reverse relationships. """ class NestedSerializer(HyperlinkedModelSerializer): class Meta: model = relation_info.related_model depth = nested_depth - 1 field_class = NestedSerializer field_kwargs = get_nested_relation_kwargs(relation_info) return field_class, field_kwargs
python
def build_nested_field(self, field_name, relation_info, nested_depth): """ Create nested fields for forward and reverse relationships. """ class NestedSerializer(HyperlinkedModelSerializer): class Meta: model = relation_info.related_model depth = nested_depth - 1 field_class = NestedSerializer field_kwargs = get_nested_relation_kwargs(relation_info) return field_class, field_kwargs
[ "def", "build_nested_field", "(", "self", ",", "field_name", ",", "relation_info", ",", "nested_depth", ")", ":", "class", "NestedSerializer", "(", "HyperlinkedModelSerializer", ")", ":", "class", "Meta", ":", "model", "=", "relation_info", ".", "related_model", "depth", "=", "nested_depth", "-", "1", "field_class", "=", "NestedSerializer", "field_kwargs", "=", "get_nested_relation_kwargs", "(", "relation_info", ")", "return", "field_class", ",", "field_kwargs" ]
Create nested fields for forward and reverse relationships.
[ "Create", "nested", "fields", "for", "forward", "and", "reverse", "relationships", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L1460-L1472
243,775
bfontaine/p7magma
magma/session.py
Session.get_url
def get_url(self, url): """ Get an absolute URL from a given one. """ if url.startswith('/'): url = '%s%s' % (self.base_url, url) return url
python
def get_url(self, url): """ Get an absolute URL from a given one. """ if url.startswith('/'): url = '%s%s' % (self.base_url, url) return url
[ "def", "get_url", "(", "self", ",", "url", ")", ":", "if", "url", ".", "startswith", "(", "'/'", ")", ":", "url", "=", "'%s%s'", "%", "(", "self", ".", "base_url", ",", "url", ")", "return", "url" ]
Get an absolute URL from a given one.
[ "Get", "an", "absolute", "URL", "from", "a", "given", "one", "." ]
713647aa9e3187c93c2577ef812f33ec42ae5494
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/session.py#L57-L63
243,776
bfontaine/p7magma
magma/session.py
Session.get_soup
def get_soup(self, *args, **kwargs): """ Shortcut for ``get`` which returns a ``BeautifulSoup`` element """ return BeautifulSoup(self.get(*args, **kwargs).text)
python
def get_soup(self, *args, **kwargs): """ Shortcut for ``get`` which returns a ``BeautifulSoup`` element """ return BeautifulSoup(self.get(*args, **kwargs).text)
[ "def", "get_soup", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "BeautifulSoup", "(", "self", ".", "get", "(", "*", "args", ",", "*", "*", "kwargs", ")", ".", "text", ")" ]
Shortcut for ``get`` which returns a ``BeautifulSoup`` element
[ "Shortcut", "for", "get", "which", "returns", "a", "BeautifulSoup", "element" ]
713647aa9e3187c93c2577ef812f33ec42ae5494
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/session.py#L73-L77
243,777
bfontaine/p7magma
magma/session.py
Session.post_soup
def post_soup(self, *args, **kwargs): """ Shortcut for ``post`` which returns a ``BeautifulSoup`` element """ return BeautifulSoup(self.post(*args, **kwargs).text)
python
def post_soup(self, *args, **kwargs): """ Shortcut for ``post`` which returns a ``BeautifulSoup`` element """ return BeautifulSoup(self.post(*args, **kwargs).text)
[ "def", "post_soup", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "BeautifulSoup", "(", "self", ".", "post", "(", "*", "args", ",", "*", "*", "kwargs", ")", ".", "text", ")" ]
Shortcut for ``post`` which returns a ``BeautifulSoup`` element
[ "Shortcut", "for", "post", "which", "returns", "a", "BeautifulSoup", "element" ]
713647aa9e3187c93c2577ef812f33ec42ae5494
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/session.py#L79-L83
243,778
bfontaine/p7magma
magma/session.py
Session.get_results_soup
def get_results_soup(self, year=None): """ ``get_soup`` on the results page. The page URL depends on the year. """ if year is None: year = self.year year = YEARS.get(year, year) return self.get_soup(URLS['results'][year])
python
def get_results_soup(self, year=None): """ ``get_soup`` on the results page. The page URL depends on the year. """ if year is None: year = self.year year = YEARS.get(year, year) return self.get_soup(URLS['results'][year])
[ "def", "get_results_soup", "(", "self", ",", "year", "=", "None", ")", ":", "if", "year", "is", "None", ":", "year", "=", "self", ".", "year", "year", "=", "YEARS", ".", "get", "(", "year", ",", "year", ")", "return", "self", ".", "get_soup", "(", "URLS", "[", "'results'", "]", "[", "year", "]", ")" ]
``get_soup`` on the results page. The page URL depends on the year.
[ "get_soup", "on", "the", "results", "page", ".", "The", "page", "URL", "depends", "on", "the", "year", "." ]
713647aa9e3187c93c2577ef812f33ec42ae5494
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/session.py#L85-L93
243,779
bfontaine/p7magma
magma/session.py
Session.login
def login(self, year, firstname, lastname, passwd, with_year=True): """ Authenticate an user """ firstname = firstname.upper() lastname = lastname.upper() if with_year and not self.set_year(year): return False url = URLS['login'] params = { 'prenom': firstname, 'nom': lastname, 'pwd': passwd, } soup = self.post_soup(url, data=params) return not soup.select('font[color=red]')
python
def login(self, year, firstname, lastname, passwd, with_year=True): """ Authenticate an user """ firstname = firstname.upper() lastname = lastname.upper() if with_year and not self.set_year(year): return False url = URLS['login'] params = { 'prenom': firstname, 'nom': lastname, 'pwd': passwd, } soup = self.post_soup(url, data=params) return not soup.select('font[color=red]')
[ "def", "login", "(", "self", ",", "year", ",", "firstname", ",", "lastname", ",", "passwd", ",", "with_year", "=", "True", ")", ":", "firstname", "=", "firstname", ".", "upper", "(", ")", "lastname", "=", "lastname", ".", "upper", "(", ")", "if", "with_year", "and", "not", "self", ".", "set_year", "(", "year", ")", ":", "return", "False", "url", "=", "URLS", "[", "'login'", "]", "params", "=", "{", "'prenom'", ":", "firstname", ",", "'nom'", ":", "lastname", ",", "'pwd'", ":", "passwd", ",", "}", "soup", "=", "self", ".", "post_soup", "(", "url", ",", "data", "=", "params", ")", "return", "not", "soup", ".", "select", "(", "'font[color=red]'", ")" ]
Authenticate an user
[ "Authenticate", "an", "user" ]
713647aa9e3187c93c2577ef812f33ec42ae5494
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/session.py#L95-L114
243,780
bfontaine/p7magma
magma/session.py
Session.set_year
def set_year(self, year): """ Set an user's year. This is required on magma just before the login. It's called by default by ``login``. """ self.year = YEARS.get(year, year) data = {'idCursus': self.year} soup = self.post_soup('/~etudiant/login.php', data=data) return bool(soup.select('ul.rMenu-hor'))
python
def set_year(self, year): """ Set an user's year. This is required on magma just before the login. It's called by default by ``login``. """ self.year = YEARS.get(year, year) data = {'idCursus': self.year} soup = self.post_soup('/~etudiant/login.php', data=data) return bool(soup.select('ul.rMenu-hor'))
[ "def", "set_year", "(", "self", ",", "year", ")", ":", "self", ".", "year", "=", "YEARS", ".", "get", "(", "year", ",", "year", ")", "data", "=", "{", "'idCursus'", ":", "self", ".", "year", "}", "soup", "=", "self", ".", "post_soup", "(", "'/~etudiant/login.php'", ",", "data", "=", "data", ")", "return", "bool", "(", "soup", ".", "select", "(", "'ul.rMenu-hor'", ")", ")" ]
Set an user's year. This is required on magma just before the login. It's called by default by ``login``.
[ "Set", "an", "user", "s", "year", ".", "This", "is", "required", "on", "magma", "just", "before", "the", "login", ".", "It", "s", "called", "by", "default", "by", "login", "." ]
713647aa9e3187c93c2577ef812f33ec42ae5494
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/session.py#L122-L130
243,781
xaptum/xtt-python
xtt/exceptions.py
_build_return_code_enum
def _build_return_code_enum(): """ Creates an IntEnum containing all the XTT return codes. Finds all return codes by scanning the FFI for items whose names match the pattern "XTT_RETURN_<X>". The name of the result enum value is the suffix "<X>". """ prefix = 'XTT_RETURN_' codes = {k[len(prefix):]:v for (k, v) in vars(_lib).items() if k.startswith(prefix)} return IntEnum('ReturnCode', codes)
python
def _build_return_code_enum(): """ Creates an IntEnum containing all the XTT return codes. Finds all return codes by scanning the FFI for items whose names match the pattern "XTT_RETURN_<X>". The name of the result enum value is the suffix "<X>". """ prefix = 'XTT_RETURN_' codes = {k[len(prefix):]:v for (k, v) in vars(_lib).items() if k.startswith(prefix)} return IntEnum('ReturnCode', codes)
[ "def", "_build_return_code_enum", "(", ")", ":", "prefix", "=", "'XTT_RETURN_'", "codes", "=", "{", "k", "[", "len", "(", "prefix", ")", ":", "]", ":", "v", "for", "(", "k", ",", "v", ")", "in", "vars", "(", "_lib", ")", ".", "items", "(", ")", "if", "k", ".", "startswith", "(", "prefix", ")", "}", "return", "IntEnum", "(", "'ReturnCode'", ",", "codes", ")" ]
Creates an IntEnum containing all the XTT return codes. Finds all return codes by scanning the FFI for items whose names match the pattern "XTT_RETURN_<X>". The name of the result enum value is the suffix "<X>".
[ "Creates", "an", "IntEnum", "containing", "all", "the", "XTT", "return", "codes", "." ]
23ee469488d710d730314bec1136c4dd7ac2cd5c
https://github.com/xaptum/xtt-python/blob/23ee469488d710d730314bec1136c4dd7ac2cd5c/xtt/exceptions.py#L33-L43
243,782
20c/twentyc.database
twentyc/database/base.py
Client
def Client(engine="couchbase", host="", auth="", database="", logger=None, verbose=True): """ Return new database client Arguments engine <str> defines which engine to use, currently supports "couchdb" and "couchbase" host <str|couchdb.Server> host url, when using couchdb this can also be a server instance auth <str> bucket_auth for couchbase, auth for couchdb database <str> database name for couchdb logger <Logger> python logger instance """ if engine == "couchbase": from twentyc.database.couchbase.client import CouchbaseClient return CouchbaseClient( host, bucket_auth=auth, logger=logger ) elif engine == "couchdb": from twentyc.database.couchdb.client import CouchDBClient return CouchDBClient( host, database, auth=auth, logger=logger, verbose=verbose ) elif engine == "dummydb": from twentyc.database.dummydb.client import DummyDBClient return DummyDBClient() else: raise InvalidEngineException(engine)
python
def Client(engine="couchbase", host="", auth="", database="", logger=None, verbose=True): """ Return new database client Arguments engine <str> defines which engine to use, currently supports "couchdb" and "couchbase" host <str|couchdb.Server> host url, when using couchdb this can also be a server instance auth <str> bucket_auth for couchbase, auth for couchdb database <str> database name for couchdb logger <Logger> python logger instance """ if engine == "couchbase": from twentyc.database.couchbase.client import CouchbaseClient return CouchbaseClient( host, bucket_auth=auth, logger=logger ) elif engine == "couchdb": from twentyc.database.couchdb.client import CouchDBClient return CouchDBClient( host, database, auth=auth, logger=logger, verbose=verbose ) elif engine == "dummydb": from twentyc.database.dummydb.client import DummyDBClient return DummyDBClient() else: raise InvalidEngineException(engine)
[ "def", "Client", "(", "engine", "=", "\"couchbase\"", ",", "host", "=", "\"\"", ",", "auth", "=", "\"\"", ",", "database", "=", "\"\"", ",", "logger", "=", "None", ",", "verbose", "=", "True", ")", ":", "if", "engine", "==", "\"couchbase\"", ":", "from", "twentyc", ".", "database", ".", "couchbase", ".", "client", "import", "CouchbaseClient", "return", "CouchbaseClient", "(", "host", ",", "bucket_auth", "=", "auth", ",", "logger", "=", "logger", ")", "elif", "engine", "==", "\"couchdb\"", ":", "from", "twentyc", ".", "database", ".", "couchdb", ".", "client", "import", "CouchDBClient", "return", "CouchDBClient", "(", "host", ",", "database", ",", "auth", "=", "auth", ",", "logger", "=", "logger", ",", "verbose", "=", "verbose", ")", "elif", "engine", "==", "\"dummydb\"", ":", "from", "twentyc", ".", "database", ".", "dummydb", ".", "client", "import", "DummyDBClient", "return", "DummyDBClient", "(", ")", "else", ":", "raise", "InvalidEngineException", "(", "engine", ")" ]
Return new database client Arguments engine <str> defines which engine to use, currently supports "couchdb" and "couchbase" host <str|couchdb.Server> host url, when using couchdb this can also be a server instance auth <str> bucket_auth for couchbase, auth for couchdb database <str> database name for couchdb logger <Logger> python logger instance
[ "Return", "new", "database", "client" ]
c6b7184d66dddafb306c94c4f98234bef1df1291
https://github.com/20c/twentyc.database/blob/c6b7184d66dddafb306c94c4f98234bef1df1291/twentyc/database/base.py#L8-L36
243,783
sassoo/goldman
goldman/serializers/jsonapi.py
Serializer._serialize_data
def _serialize_data(self, data): """ Turn the data into a JSON API compliant resource object WARN: This function has both side effects & a return. It's complete shit because it mutates data & yet returns a new doc. FIX. :spec: jsonapi.org/format/#document-resource-objects :param data: dict for serializing :return: dict resource in JSON API format """ rels = {} rlink = rid_url(data['rtype'], data['rid']) doc = { 'id': data.pop('rid'), 'type': data.pop('rtype'), 'links': { 'self': rlink, }, } for key, val in data['to_many'].items(): rels.update(self._serialize_to_many(key, val, rlink)) del data['to_many'] for key, val in data['to_one'].items(): rels.update(self._serialize_to_one(key, val, rlink)) del data['to_one'] if data: doc['attributes'] = data if rels: doc['relationships'] = rels return doc
python
def _serialize_data(self, data): """ Turn the data into a JSON API compliant resource object WARN: This function has both side effects & a return. It's complete shit because it mutates data & yet returns a new doc. FIX. :spec: jsonapi.org/format/#document-resource-objects :param data: dict for serializing :return: dict resource in JSON API format """ rels = {} rlink = rid_url(data['rtype'], data['rid']) doc = { 'id': data.pop('rid'), 'type': data.pop('rtype'), 'links': { 'self': rlink, }, } for key, val in data['to_many'].items(): rels.update(self._serialize_to_many(key, val, rlink)) del data['to_many'] for key, val in data['to_one'].items(): rels.update(self._serialize_to_one(key, val, rlink)) del data['to_one'] if data: doc['attributes'] = data if rels: doc['relationships'] = rels return doc
[ "def", "_serialize_data", "(", "self", ",", "data", ")", ":", "rels", "=", "{", "}", "rlink", "=", "rid_url", "(", "data", "[", "'rtype'", "]", ",", "data", "[", "'rid'", "]", ")", "doc", "=", "{", "'id'", ":", "data", ".", "pop", "(", "'rid'", ")", ",", "'type'", ":", "data", ".", "pop", "(", "'rtype'", ")", ",", "'links'", ":", "{", "'self'", ":", "rlink", ",", "}", ",", "}", "for", "key", ",", "val", "in", "data", "[", "'to_many'", "]", ".", "items", "(", ")", ":", "rels", ".", "update", "(", "self", ".", "_serialize_to_many", "(", "key", ",", "val", ",", "rlink", ")", ")", "del", "data", "[", "'to_many'", "]", "for", "key", ",", "val", "in", "data", "[", "'to_one'", "]", ".", "items", "(", ")", ":", "rels", ".", "update", "(", "self", ".", "_serialize_to_one", "(", "key", ",", "val", ",", "rlink", ")", ")", "del", "data", "[", "'to_one'", "]", "if", "data", ":", "doc", "[", "'attributes'", "]", "=", "data", "if", "rels", ":", "doc", "[", "'relationships'", "]", "=", "rels", "return", "doc" ]
Turn the data into a JSON API compliant resource object WARN: This function has both side effects & a return. It's complete shit because it mutates data & yet returns a new doc. FIX. :spec: jsonapi.org/format/#document-resource-objects :param data: dict for serializing :return: dict resource in JSON API format
[ "Turn", "the", "data", "into", "a", "JSON", "API", "compliant", "resource", "object" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/serializers/jsonapi.py#L78-L117
243,784
sassoo/goldman
goldman/serializers/jsonapi.py
Serializer._serialize_pages
def _serialize_pages(self): """ Return a JSON API compliant pagination links section If the paginator has a value for a given link then this method will also add the same links to the response objects `link` header according to the guidance of RFC 5988. Falcon has a native add_link helper for forming the `link` header according to RFC 5988. :return: dict of links used for pagination """ pages = self.req.pages.to_dict() links = {} for key, val in pages.items(): if val: params = self.req.params params.update(val) links[key] = '%s?%s' % (self.req.path, urlencode(params)) self.resp.add_link(links[key], key) else: links[key] = val return links
python
def _serialize_pages(self): """ Return a JSON API compliant pagination links section If the paginator has a value for a given link then this method will also add the same links to the response objects `link` header according to the guidance of RFC 5988. Falcon has a native add_link helper for forming the `link` header according to RFC 5988. :return: dict of links used for pagination """ pages = self.req.pages.to_dict() links = {} for key, val in pages.items(): if val: params = self.req.params params.update(val) links[key] = '%s?%s' % (self.req.path, urlencode(params)) self.resp.add_link(links[key], key) else: links[key] = val return links
[ "def", "_serialize_pages", "(", "self", ")", ":", "pages", "=", "self", ".", "req", ".", "pages", ".", "to_dict", "(", ")", "links", "=", "{", "}", "for", "key", ",", "val", "in", "pages", ".", "items", "(", ")", ":", "if", "val", ":", "params", "=", "self", ".", "req", ".", "params", "params", ".", "update", "(", "val", ")", "links", "[", "key", "]", "=", "'%s?%s'", "%", "(", "self", ".", "req", ".", "path", ",", "urlencode", "(", "params", ")", ")", "self", ".", "resp", ".", "add_link", "(", "links", "[", "key", "]", ",", "key", ")", "else", ":", "links", "[", "key", "]", "=", "val", "return", "links" ]
Return a JSON API compliant pagination links section If the paginator has a value for a given link then this method will also add the same links to the response objects `link` header according to the guidance of RFC 5988. Falcon has a native add_link helper for forming the `link` header according to RFC 5988. :return: dict of links used for pagination
[ "Return", "a", "JSON", "API", "compliant", "pagination", "links", "section" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/serializers/jsonapi.py#L119-L145
243,785
sassoo/goldman
goldman/serializers/jsonapi.py
Serializer._serialize_to_many
def _serialize_to_many(self, key, vals, rlink): """ Make a to_many JSON API compliant :spec: jsonapi.org/format/#document-resource-object-relationships :param key: the string name of the relationship field :param vals: array of dict's containing `rid` & `rtype` keys for the to_many, empty array if no values, & None if the to_manys values are unknown :return: dict as documented in the spec link """ rel = { key: { 'data': [], 'links': { 'related': rlink + '/' + key } } } try: for val in vals: rel[key]['data'].append({ 'id': val['rid'], 'type': val['rtype'], }) except TypeError: del rel[key]['data'] return rel
python
def _serialize_to_many(self, key, vals, rlink): """ Make a to_many JSON API compliant :spec: jsonapi.org/format/#document-resource-object-relationships :param key: the string name of the relationship field :param vals: array of dict's containing `rid` & `rtype` keys for the to_many, empty array if no values, & None if the to_manys values are unknown :return: dict as documented in the spec link """ rel = { key: { 'data': [], 'links': { 'related': rlink + '/' + key } } } try: for val in vals: rel[key]['data'].append({ 'id': val['rid'], 'type': val['rtype'], }) except TypeError: del rel[key]['data'] return rel
[ "def", "_serialize_to_many", "(", "self", ",", "key", ",", "vals", ",", "rlink", ")", ":", "rel", "=", "{", "key", ":", "{", "'data'", ":", "[", "]", ",", "'links'", ":", "{", "'related'", ":", "rlink", "+", "'/'", "+", "key", "}", "}", "}", "try", ":", "for", "val", "in", "vals", ":", "rel", "[", "key", "]", "[", "'data'", "]", ".", "append", "(", "{", "'id'", ":", "val", "[", "'rid'", "]", ",", "'type'", ":", "val", "[", "'rtype'", "]", ",", "}", ")", "except", "TypeError", ":", "del", "rel", "[", "key", "]", "[", "'data'", "]", "return", "rel" ]
Make a to_many JSON API compliant :spec: jsonapi.org/format/#document-resource-object-relationships :param key: the string name of the relationship field :param vals: array of dict's containing `rid` & `rtype` keys for the to_many, empty array if no values, & None if the to_manys values are unknown :return: dict as documented in the spec link
[ "Make", "a", "to_many", "JSON", "API", "compliant" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/serializers/jsonapi.py#L147-L180
243,786
sassoo/goldman
goldman/serializers/jsonapi.py
Serializer._serialize_to_one
def _serialize_to_one(self, key, val, rlink): """ Make a to_one JSON API compliant :spec: jsonapi.org/format/#document-resource-object-relationships :param key: the string name of the relationship field :param val: dict containing `rid` & `rtype` keys for the to_one & None if the to_one is null :return: dict as documented in the spec link """ data = None if val and val['rid']: data = {'id': val['rid'], 'type': val['rtype']} return { key: { 'data': data, 'links': { 'related': rlink + '/' + key } } }
python
def _serialize_to_one(self, key, val, rlink): """ Make a to_one JSON API compliant :spec: jsonapi.org/format/#document-resource-object-relationships :param key: the string name of the relationship field :param val: dict containing `rid` & `rtype` keys for the to_one & None if the to_one is null :return: dict as documented in the spec link """ data = None if val and val['rid']: data = {'id': val['rid'], 'type': val['rtype']} return { key: { 'data': data, 'links': { 'related': rlink + '/' + key } } }
[ "def", "_serialize_to_one", "(", "self", ",", "key", ",", "val", ",", "rlink", ")", ":", "data", "=", "None", "if", "val", "and", "val", "[", "'rid'", "]", ":", "data", "=", "{", "'id'", ":", "val", "[", "'rid'", "]", ",", "'type'", ":", "val", "[", "'rtype'", "]", "}", "return", "{", "key", ":", "{", "'data'", ":", "data", ",", "'links'", ":", "{", "'related'", ":", "rlink", "+", "'/'", "+", "key", "}", "}", "}" ]
Make a to_one JSON API compliant :spec: jsonapi.org/format/#document-resource-object-relationships :param key: the string name of the relationship field :param val: dict containing `rid` & `rtype` keys for the to_one & None if the to_one is null :return: dict as documented in the spec link
[ "Make", "a", "to_one", "JSON", "API", "compliant" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/serializers/jsonapi.py#L182-L207
243,787
sassoo/goldman
goldman/validators/__init__.py
validate_uuid
def validate_uuid(value): """ UUID 128-bit validator """ if value and not isinstance(value, UUID): try: return UUID(str(value), version=4) except (AttributeError, ValueError): raise ValidationError('not a valid UUID') return value
python
def validate_uuid(value): """ UUID 128-bit validator """ if value and not isinstance(value, UUID): try: return UUID(str(value), version=4) except (AttributeError, ValueError): raise ValidationError('not a valid UUID') return value
[ "def", "validate_uuid", "(", "value", ")", ":", "if", "value", "and", "not", "isinstance", "(", "value", ",", "UUID", ")", ":", "try", ":", "return", "UUID", "(", "str", "(", "value", ")", ",", "version", "=", "4", ")", "except", "(", "AttributeError", ",", "ValueError", ")", ":", "raise", "ValidationError", "(", "'not a valid UUID'", ")", "return", "value" ]
UUID 128-bit validator
[ "UUID", "128", "-", "bit", "validator" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/validators/__init__.py#L24-L32
243,788
pybel/pybel-artifactory
src/pybel_artifactory/utils.py
get_namespace_url
def get_namespace_url(module, version): """Get a BEL namespace file from Artifactory given the name and version. :param str module: :param str version: :type: str """ module = module.strip('/') return '{module}/{name}'.format( module=get_namespace_module_url(module), name=get_namespace_file_name(module, version), )
python
def get_namespace_url(module, version): """Get a BEL namespace file from Artifactory given the name and version. :param str module: :param str version: :type: str """ module = module.strip('/') return '{module}/{name}'.format( module=get_namespace_module_url(module), name=get_namespace_file_name(module, version), )
[ "def", "get_namespace_url", "(", "module", ",", "version", ")", ":", "module", "=", "module", ".", "strip", "(", "'/'", ")", "return", "'{module}/{name}'", ".", "format", "(", "module", "=", "get_namespace_module_url", "(", "module", ")", ",", "name", "=", "get_namespace_file_name", "(", "module", ",", "version", ")", ",", ")" ]
Get a BEL namespace file from Artifactory given the name and version. :param str module: :param str version: :type: str
[ "Get", "a", "BEL", "namespace", "file", "from", "Artifactory", "given", "the", "name", "and", "version", "." ]
720107780a59be2ef08885290dfa519b1da62871
https://github.com/pybel/pybel-artifactory/blob/720107780a59be2ef08885290dfa519b1da62871/src/pybel_artifactory/utils.py#L20-L32
243,789
pybel/pybel-artifactory
src/pybel_artifactory/utils.py
get_annotation_url
def get_annotation_url(module, version): """Get a BEL annotation file from artifactory given the name and version. :param str module: :param str version: :type: str """ module = module.strip('/') return '{module}/{name}'.format( module=get_annotation_module_url(module), name=get_annotation_file_name(module, version), )
python
def get_annotation_url(module, version): """Get a BEL annotation file from artifactory given the name and version. :param str module: :param str version: :type: str """ module = module.strip('/') return '{module}/{name}'.format( module=get_annotation_module_url(module), name=get_annotation_file_name(module, version), )
[ "def", "get_annotation_url", "(", "module", ",", "version", ")", ":", "module", "=", "module", ".", "strip", "(", "'/'", ")", "return", "'{module}/{name}'", ".", "format", "(", "module", "=", "get_annotation_module_url", "(", "module", ")", ",", "name", "=", "get_annotation_file_name", "(", "module", ",", "version", ")", ",", ")" ]
Get a BEL annotation file from artifactory given the name and version. :param str module: :param str version: :type: str
[ "Get", "a", "BEL", "annotation", "file", "from", "artifactory", "given", "the", "name", "and", "version", "." ]
720107780a59be2ef08885290dfa519b1da62871
https://github.com/pybel/pybel-artifactory/blob/720107780a59be2ef08885290dfa519b1da62871/src/pybel_artifactory/utils.py#L35-L47
243,790
pybel/pybel-artifactory
src/pybel_artifactory/utils.py
get_knowledge_url
def get_knowledge_url(module, version): """Get a BEL knowledge file from Artifactory given the name and version. :param str module: :param str version: :rtype: str """ module = module.strip('/') return '{module}/{name}'.format( module=get_knowledge_module_url(module), name=get_knowledge_file_name(module, version), )
python
def get_knowledge_url(module, version): """Get a BEL knowledge file from Artifactory given the name and version. :param str module: :param str version: :rtype: str """ module = module.strip('/') return '{module}/{name}'.format( module=get_knowledge_module_url(module), name=get_knowledge_file_name(module, version), )
[ "def", "get_knowledge_url", "(", "module", ",", "version", ")", ":", "module", "=", "module", ".", "strip", "(", "'/'", ")", "return", "'{module}/{name}'", ".", "format", "(", "module", "=", "get_knowledge_module_url", "(", "module", ")", ",", "name", "=", "get_knowledge_file_name", "(", "module", ",", "version", ")", ",", ")" ]
Get a BEL knowledge file from Artifactory given the name and version. :param str module: :param str version: :rtype: str
[ "Get", "a", "BEL", "knowledge", "file", "from", "Artifactory", "given", "the", "name", "and", "version", "." ]
720107780a59be2ef08885290dfa519b1da62871
https://github.com/pybel/pybel-artifactory/blob/720107780a59be2ef08885290dfa519b1da62871/src/pybel_artifactory/utils.py#L50-L62
243,791
xaptum/xtt-python
_build_xtt.py
local_path
def local_path(path): """ Return the absolute path relative to the root of this project """ current = os.path.dirname(__file__) root = current return os.path.abspath(os.path.join(root, path))
python
def local_path(path): """ Return the absolute path relative to the root of this project """ current = os.path.dirname(__file__) root = current return os.path.abspath(os.path.join(root, path))
[ "def", "local_path", "(", "path", ")", ":", "current", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "root", "=", "current", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "root", ",", "path", ")", ")" ]
Return the absolute path relative to the root of this project
[ "Return", "the", "absolute", "path", "relative", "to", "the", "root", "of", "this", "project" ]
23ee469488d710d730314bec1136c4dd7ac2cd5c
https://github.com/xaptum/xtt-python/blob/23ee469488d710d730314bec1136c4dd7ac2cd5c/_build_xtt.py#L42-L48
243,792
quasipedia/swaggery
swaggery/application.py
Swaggery._register_resources
def _register_resources(self, api_dirs, do_checks): '''Register all Apis, Resources and Models with the application.''' msg = 'Looking-up for APIs in the following directories: {}' log.debug(msg.format(api_dirs)) if do_checks: check_and_load(api_dirs) else: msg = 'Loading module "{}" from directory "{}"' for loader, mname, _ in pkgutil.walk_packages(api_dirs): sys.path.append(os.path.abspath(loader.path)) log.debug(msg.format(mname, loader.path)) import_module(mname)
python
def _register_resources(self, api_dirs, do_checks): '''Register all Apis, Resources and Models with the application.''' msg = 'Looking-up for APIs in the following directories: {}' log.debug(msg.format(api_dirs)) if do_checks: check_and_load(api_dirs) else: msg = 'Loading module "{}" from directory "{}"' for loader, mname, _ in pkgutil.walk_packages(api_dirs): sys.path.append(os.path.abspath(loader.path)) log.debug(msg.format(mname, loader.path)) import_module(mname)
[ "def", "_register_resources", "(", "self", ",", "api_dirs", ",", "do_checks", ")", ":", "msg", "=", "'Looking-up for APIs in the following directories: {}'", "log", ".", "debug", "(", "msg", ".", "format", "(", "api_dirs", ")", ")", "if", "do_checks", ":", "check_and_load", "(", "api_dirs", ")", "else", ":", "msg", "=", "'Loading module \"{}\" from directory \"{}\"'", "for", "loader", ",", "mname", ",", "_", "in", "pkgutil", ".", "walk_packages", "(", "api_dirs", ")", ":", "sys", ".", "path", ".", "append", "(", "os", ".", "path", ".", "abspath", "(", "loader", ".", "path", ")", ")", "log", ".", "debug", "(", "msg", ".", "format", "(", "mname", ",", "loader", ".", "path", ")", ")", "import_module", "(", "mname", ")" ]
Register all Apis, Resources and Models with the application.
[ "Register", "all", "Apis", "Resources", "and", "Models", "with", "the", "application", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/application.py#L35-L46
243,793
quasipedia/swaggery
swaggery/application.py
Swaggery._mount_resources
def _mount_resources(self): '''Mount all registered resources onto the application.''' rules = [] self.callback_map = {} for ep in Resource: for rule, callback in ep.get_routing_tuples(): log.debug('Path "{}" mapped to "{}"'.format( rule.rule, rule.endpoint)) rules.append(rule) self.callback_map[rule.endpoint] = callback self.url_map = Map(rules)
python
def _mount_resources(self): '''Mount all registered resources onto the application.''' rules = [] self.callback_map = {} for ep in Resource: for rule, callback in ep.get_routing_tuples(): log.debug('Path "{}" mapped to "{}"'.format( rule.rule, rule.endpoint)) rules.append(rule) self.callback_map[rule.endpoint] = callback self.url_map = Map(rules)
[ "def", "_mount_resources", "(", "self", ")", ":", "rules", "=", "[", "]", "self", ".", "callback_map", "=", "{", "}", "for", "ep", "in", "Resource", ":", "for", "rule", ",", "callback", "in", "ep", ".", "get_routing_tuples", "(", ")", ":", "log", ".", "debug", "(", "'Path \"{}\" mapped to \"{}\"'", ".", "format", "(", "rule", ".", "rule", ",", "rule", ".", "endpoint", ")", ")", "rules", ".", "append", "(", "rule", ")", "self", ".", "callback_map", "[", "rule", ".", "endpoint", "]", "=", "callback", "self", ".", "url_map", "=", "Map", "(", "rules", ")" ]
Mount all registered resources onto the application.
[ "Mount", "all", "registered", "resources", "onto", "the", "application", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/application.py#L48-L58
243,794
quasipedia/swaggery
swaggery/application.py
Swaggery._get_coroutine
def _get_coroutine(self, request, start_response): '''Try to dispapch the request and get the matching coroutine.''' adapter = self.url_map.bind_to_environ(request.environ) resource, kwargs = adapter.match() callback = self.callback_map[resource] inject_extra_args(callback, request, kwargs) return callback(request, start_response, **kwargs)
python
def _get_coroutine(self, request, start_response): '''Try to dispapch the request and get the matching coroutine.''' adapter = self.url_map.bind_to_environ(request.environ) resource, kwargs = adapter.match() callback = self.callback_map[resource] inject_extra_args(callback, request, kwargs) return callback(request, start_response, **kwargs)
[ "def", "_get_coroutine", "(", "self", ",", "request", ",", "start_response", ")", ":", "adapter", "=", "self", ".", "url_map", ".", "bind_to_environ", "(", "request", ".", "environ", ")", "resource", ",", "kwargs", "=", "adapter", ".", "match", "(", ")", "callback", "=", "self", ".", "callback_map", "[", "resource", "]", "inject_extra_args", "(", "callback", ",", "request", ",", "kwargs", ")", "return", "callback", "(", "request", ",", "start_response", ",", "*", "*", "kwargs", ")" ]
Try to dispapch the request and get the matching coroutine.
[ "Try", "to", "dispapch", "the", "request", "and", "get", "the", "matching", "coroutine", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/application.py#L60-L66
243,795
rameshg87/pyremotevbox
pyremotevbox/ZSI/client.py
_Binding.SetAuth
def SetAuth(self, style, user=None, password=None): '''Change auth style, return object to user. ''' self.auth_style, self.auth_user, self.auth_pass = \ style, user, password return self
python
def SetAuth(self, style, user=None, password=None): '''Change auth style, return object to user. ''' self.auth_style, self.auth_user, self.auth_pass = \ style, user, password return self
[ "def", "SetAuth", "(", "self", ",", "style", ",", "user", "=", "None", ",", "password", "=", "None", ")", ":", "self", ".", "auth_style", ",", "self", ".", "auth_user", ",", "self", ".", "auth_pass", "=", "style", ",", "user", ",", "password", "return", "self" ]
Change auth style, return object to user.
[ "Change", "auth", "style", "return", "object", "to", "user", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/client.py#L139-L144
243,796
rameshg87/pyremotevbox
pyremotevbox/ZSI/client.py
_Binding.AddHeader
def AddHeader(self, header, value): '''Add a header to send. ''' self.user_headers.append((header, value)) return self
python
def AddHeader(self, header, value): '''Add a header to send. ''' self.user_headers.append((header, value)) return self
[ "def", "AddHeader", "(", "self", ",", "header", ",", "value", ")", ":", "self", ".", "user_headers", ".", "append", "(", "(", "header", ",", "value", ")", ")", "return", "self" ]
Add a header to send.
[ "Add", "a", "header", "to", "send", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/client.py#L163-L167
243,797
rameshg87/pyremotevbox
pyremotevbox/ZSI/client.py
_Binding.__addcookies
def __addcookies(self): '''Add cookies from self.cookies to request in self.h ''' for cname, morsel in self.cookies.items(): attrs = [] value = morsel.get('version', '') if value != '' and value != '0': attrs.append('$Version=%s' % value) attrs.append('%s=%s' % (cname, morsel.coded_value)) value = morsel.get('path') if value: attrs.append('$Path=%s' % value) value = morsel.get('domain') if value: attrs.append('$Domain=%s' % value) self.h.putheader('Cookie', "; ".join(attrs))
python
def __addcookies(self): '''Add cookies from self.cookies to request in self.h ''' for cname, morsel in self.cookies.items(): attrs = [] value = morsel.get('version', '') if value != '' and value != '0': attrs.append('$Version=%s' % value) attrs.append('%s=%s' % (cname, morsel.coded_value)) value = morsel.get('path') if value: attrs.append('$Path=%s' % value) value = morsel.get('domain') if value: attrs.append('$Domain=%s' % value) self.h.putheader('Cookie', "; ".join(attrs))
[ "def", "__addcookies", "(", "self", ")", ":", "for", "cname", ",", "morsel", "in", "self", ".", "cookies", ".", "items", "(", ")", ":", "attrs", "=", "[", "]", "value", "=", "morsel", ".", "get", "(", "'version'", ",", "''", ")", "if", "value", "!=", "''", "and", "value", "!=", "'0'", ":", "attrs", ".", "append", "(", "'$Version=%s'", "%", "value", ")", "attrs", ".", "append", "(", "'%s=%s'", "%", "(", "cname", ",", "morsel", ".", "coded_value", ")", ")", "value", "=", "morsel", ".", "get", "(", "'path'", ")", "if", "value", ":", "attrs", ".", "append", "(", "'$Path=%s'", "%", "value", ")", "value", "=", "morsel", ".", "get", "(", "'domain'", ")", "if", "value", ":", "attrs", ".", "append", "(", "'$Domain=%s'", "%", "value", ")", "self", ".", "h", ".", "putheader", "(", "'Cookie'", ",", "\"; \"", ".", "join", "(", "attrs", ")", ")" ]
Add cookies from self.cookies to request in self.h
[ "Add", "cookies", "from", "self", ".", "cookies", "to", "request", "in", "self", ".", "h" ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/client.py#L169-L184
243,798
rameshg87/pyremotevbox
pyremotevbox/ZSI/client.py
_Binding.ReceiveRaw
def ReceiveRaw(self, **kw): '''Read a server reply, unconverted to any format and return it. ''' if self.data: return self.data trace = self.trace while 1: response = self.h.getresponse() self.reply_code, self.reply_msg, self.reply_headers, self.data = \ response.status, response.reason, response.msg, response.read() if trace: print >>trace, "_" * 33, time.ctime(time.time()), "RESPONSE:" for i in (self.reply_code, self.reply_msg,): print >>trace, str(i) print >>trace, "-------" print >>trace, str(self.reply_headers) print >>trace, self.data saved = None for d in response.msg.getallmatchingheaders('set-cookie'): if d[0] in [ ' ', '\t' ]: saved += d.strip() else: if saved: self.cookies.load(saved) saved = d.strip() if saved: self.cookies.load(saved) if response.status == 401: if not callable(self.http_callbacks.get(response.status,None)): raise RuntimeError, 'HTTP Digest Authorization Failed' self.http_callbacks[response.status](response) continue if response.status != 100: break # The httplib doesn't understand the HTTP continuation header. # Horrible internals hack to patch things up. self.h._HTTPConnection__state = httplib._CS_REQ_SENT self.h._HTTPConnection__response = None return self.data
python
def ReceiveRaw(self, **kw): '''Read a server reply, unconverted to any format and return it. ''' if self.data: return self.data trace = self.trace while 1: response = self.h.getresponse() self.reply_code, self.reply_msg, self.reply_headers, self.data = \ response.status, response.reason, response.msg, response.read() if trace: print >>trace, "_" * 33, time.ctime(time.time()), "RESPONSE:" for i in (self.reply_code, self.reply_msg,): print >>trace, str(i) print >>trace, "-------" print >>trace, str(self.reply_headers) print >>trace, self.data saved = None for d in response.msg.getallmatchingheaders('set-cookie'): if d[0] in [ ' ', '\t' ]: saved += d.strip() else: if saved: self.cookies.load(saved) saved = d.strip() if saved: self.cookies.load(saved) if response.status == 401: if not callable(self.http_callbacks.get(response.status,None)): raise RuntimeError, 'HTTP Digest Authorization Failed' self.http_callbacks[response.status](response) continue if response.status != 100: break # The httplib doesn't understand the HTTP continuation header. # Horrible internals hack to patch things up. self.h._HTTPConnection__state = httplib._CS_REQ_SENT self.h._HTTPConnection__response = None return self.data
[ "def", "ReceiveRaw", "(", "self", ",", "*", "*", "kw", ")", ":", "if", "self", ".", "data", ":", "return", "self", ".", "data", "trace", "=", "self", ".", "trace", "while", "1", ":", "response", "=", "self", ".", "h", ".", "getresponse", "(", ")", "self", ".", "reply_code", ",", "self", ".", "reply_msg", ",", "self", ".", "reply_headers", ",", "self", ".", "data", "=", "response", ".", "status", ",", "response", ".", "reason", ",", "response", ".", "msg", ",", "response", ".", "read", "(", ")", "if", "trace", ":", "print", ">>", "trace", ",", "\"_\"", "*", "33", ",", "time", ".", "ctime", "(", "time", ".", "time", "(", ")", ")", ",", "\"RESPONSE:\"", "for", "i", "in", "(", "self", ".", "reply_code", ",", "self", ".", "reply_msg", ",", ")", ":", "print", ">>", "trace", ",", "str", "(", "i", ")", "print", ">>", "trace", ",", "\"-------\"", "print", ">>", "trace", ",", "str", "(", "self", ".", "reply_headers", ")", "print", ">>", "trace", ",", "self", ".", "data", "saved", "=", "None", "for", "d", "in", "response", ".", "msg", ".", "getallmatchingheaders", "(", "'set-cookie'", ")", ":", "if", "d", "[", "0", "]", "in", "[", "' '", ",", "'\\t'", "]", ":", "saved", "+=", "d", ".", "strip", "(", ")", "else", ":", "if", "saved", ":", "self", ".", "cookies", ".", "load", "(", "saved", ")", "saved", "=", "d", ".", "strip", "(", ")", "if", "saved", ":", "self", ".", "cookies", ".", "load", "(", "saved", ")", "if", "response", ".", "status", "==", "401", ":", "if", "not", "callable", "(", "self", ".", "http_callbacks", ".", "get", "(", "response", ".", "status", ",", "None", ")", ")", ":", "raise", "RuntimeError", ",", "'HTTP Digest Authorization Failed'", "self", ".", "http_callbacks", "[", "response", ".", "status", "]", "(", "response", ")", "continue", "if", "response", ".", "status", "!=", "100", ":", "break", "# The httplib doesn't understand the HTTP continuation header.", "# Horrible internals hack to patch things up.", "self", ".", "h", ".", "_HTTPConnection__state", "=", "httplib", ".", "_CS_REQ_SENT", "self", ".", "h", ".", "_HTTPConnection__response", "=", "None", "return", "self", ".", "data" ]
Read a server reply, unconverted to any format and return it.
[ "Read", "a", "server", "reply", "unconverted", "to", "any", "format", "and", "return", "it", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/client.py#L367-L402
243,799
rameshg87/pyremotevbox
pyremotevbox/ZSI/client.py
_Binding.ReceiveSOAP
def ReceiveSOAP(self, readerclass=None, **kw): '''Get back a SOAP message. ''' if self.ps: return self.ps if not self.IsSOAP(): raise TypeError( 'Response is "%s", not "text/xml"' % self.reply_headers.type) if len(self.data) == 0: raise TypeError('Received empty response') self.ps = ParsedSoap(self.data, readerclass=readerclass or self.readerclass, encodingStyle=kw.get('encodingStyle')) if self.sig_handler is not None: self.sig_handler.verify(self.ps) return self.ps
python
def ReceiveSOAP(self, readerclass=None, **kw): '''Get back a SOAP message. ''' if self.ps: return self.ps if not self.IsSOAP(): raise TypeError( 'Response is "%s", not "text/xml"' % self.reply_headers.type) if len(self.data) == 0: raise TypeError('Received empty response') self.ps = ParsedSoap(self.data, readerclass=readerclass or self.readerclass, encodingStyle=kw.get('encodingStyle')) if self.sig_handler is not None: self.sig_handler.verify(self.ps) return self.ps
[ "def", "ReceiveSOAP", "(", "self", ",", "readerclass", "=", "None", ",", "*", "*", "kw", ")", ":", "if", "self", ".", "ps", ":", "return", "self", ".", "ps", "if", "not", "self", ".", "IsSOAP", "(", ")", ":", "raise", "TypeError", "(", "'Response is \"%s\", not \"text/xml\"'", "%", "self", ".", "reply_headers", ".", "type", ")", "if", "len", "(", "self", ".", "data", ")", "==", "0", ":", "raise", "TypeError", "(", "'Received empty response'", ")", "self", ".", "ps", "=", "ParsedSoap", "(", "self", ".", "data", ",", "readerclass", "=", "readerclass", "or", "self", ".", "readerclass", ",", "encodingStyle", "=", "kw", ".", "get", "(", "'encodingStyle'", ")", ")", "if", "self", ".", "sig_handler", "is", "not", "None", ":", "self", ".", "sig_handler", ".", "verify", "(", "self", ".", "ps", ")", "return", "self", ".", "ps" ]
Get back a SOAP message.
[ "Get", "back", "a", "SOAP", "message", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/client.py#L410-L427