id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
244,100
|
PSU-OIT-ARC/django-local-settings
|
local_settings/strategy.py
|
guess_strategy_type
|
def guess_strategy_type(file_name_or_ext):
"""Guess strategy type to use for file by extension.
Args:
file_name_or_ext: Either a file name with an extension or just
an extension
Returns:
Strategy: Type corresponding to extension or None if there's no
corresponding strategy type
"""
if '.' not in file_name_or_ext:
ext = file_name_or_ext
else:
name, ext = os.path.splitext(file_name_or_ext)
ext = ext.lstrip('.')
file_type_map = get_file_type_map()
return file_type_map.get(ext, None)
|
python
|
def guess_strategy_type(file_name_or_ext):
"""Guess strategy type to use for file by extension.
Args:
file_name_or_ext: Either a file name with an extension or just
an extension
Returns:
Strategy: Type corresponding to extension or None if there's no
corresponding strategy type
"""
if '.' not in file_name_or_ext:
ext = file_name_or_ext
else:
name, ext = os.path.splitext(file_name_or_ext)
ext = ext.lstrip('.')
file_type_map = get_file_type_map()
return file_type_map.get(ext, None)
|
[
"def",
"guess_strategy_type",
"(",
"file_name_or_ext",
")",
":",
"if",
"'.'",
"not",
"in",
"file_name_or_ext",
":",
"ext",
"=",
"file_name_or_ext",
"else",
":",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_name_or_ext",
")",
"ext",
"=",
"ext",
".",
"lstrip",
"(",
"'.'",
")",
"file_type_map",
"=",
"get_file_type_map",
"(",
")",
"return",
"file_type_map",
".",
"get",
"(",
"ext",
",",
"None",
")"
] |
Guess strategy type to use for file by extension.
Args:
file_name_or_ext: Either a file name with an extension or just
an extension
Returns:
Strategy: Type corresponding to extension or None if there's no
corresponding strategy type
|
[
"Guess",
"strategy",
"type",
"to",
"use",
"for",
"file",
"by",
"extension",
"."
] |
758810fbd9411c2046a187afcac6532155cac694
|
https://github.com/PSU-OIT-ARC/django-local-settings/blob/758810fbd9411c2046a187afcac6532155cac694/local_settings/strategy.py#L232-L250
|
244,101
|
PSU-OIT-ARC/django-local-settings
|
local_settings/strategy.py
|
INIStrategy.read_file
|
def read_file(self, file_name, section=None):
"""Read settings from specified ``section`` of config file."""
file_name, section = self.parse_file_name_and_section(file_name, section)
if not os.path.isfile(file_name):
raise SettingsFileNotFoundError(file_name)
parser = self.make_parser()
with open(file_name) as fp:
parser.read_file(fp)
settings = OrderedDict()
if parser.has_section(section):
section_dict = parser[section]
self.section_found_while_reading = True
else:
section_dict = parser.defaults().copy()
extends = section_dict.get('extends')
if extends:
extends = self.decode_value(extends)
extends, extends_section = self.parse_file_name_and_section(
extends, extender=file_name, extender_section=section)
settings.update(self.read_file(extends, extends_section))
settings.update(section_dict)
if not self.section_found_while_reading:
raise SettingsFileSectionNotFoundError(section)
return settings
|
python
|
def read_file(self, file_name, section=None):
"""Read settings from specified ``section`` of config file."""
file_name, section = self.parse_file_name_and_section(file_name, section)
if not os.path.isfile(file_name):
raise SettingsFileNotFoundError(file_name)
parser = self.make_parser()
with open(file_name) as fp:
parser.read_file(fp)
settings = OrderedDict()
if parser.has_section(section):
section_dict = parser[section]
self.section_found_while_reading = True
else:
section_dict = parser.defaults().copy()
extends = section_dict.get('extends')
if extends:
extends = self.decode_value(extends)
extends, extends_section = self.parse_file_name_and_section(
extends, extender=file_name, extender_section=section)
settings.update(self.read_file(extends, extends_section))
settings.update(section_dict)
if not self.section_found_while_reading:
raise SettingsFileSectionNotFoundError(section)
return settings
|
[
"def",
"read_file",
"(",
"self",
",",
"file_name",
",",
"section",
"=",
"None",
")",
":",
"file_name",
",",
"section",
"=",
"self",
".",
"parse_file_name_and_section",
"(",
"file_name",
",",
"section",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_name",
")",
":",
"raise",
"SettingsFileNotFoundError",
"(",
"file_name",
")",
"parser",
"=",
"self",
".",
"make_parser",
"(",
")",
"with",
"open",
"(",
"file_name",
")",
"as",
"fp",
":",
"parser",
".",
"read_file",
"(",
"fp",
")",
"settings",
"=",
"OrderedDict",
"(",
")",
"if",
"parser",
".",
"has_section",
"(",
"section",
")",
":",
"section_dict",
"=",
"parser",
"[",
"section",
"]",
"self",
".",
"section_found_while_reading",
"=",
"True",
"else",
":",
"section_dict",
"=",
"parser",
".",
"defaults",
"(",
")",
".",
"copy",
"(",
")",
"extends",
"=",
"section_dict",
".",
"get",
"(",
"'extends'",
")",
"if",
"extends",
":",
"extends",
"=",
"self",
".",
"decode_value",
"(",
"extends",
")",
"extends",
",",
"extends_section",
"=",
"self",
".",
"parse_file_name_and_section",
"(",
"extends",
",",
"extender",
"=",
"file_name",
",",
"extender_section",
"=",
"section",
")",
"settings",
".",
"update",
"(",
"self",
".",
"read_file",
"(",
"extends",
",",
"extends_section",
")",
")",
"settings",
".",
"update",
"(",
"section_dict",
")",
"if",
"not",
"self",
".",
"section_found_while_reading",
":",
"raise",
"SettingsFileSectionNotFoundError",
"(",
"section",
")",
"return",
"settings"
] |
Read settings from specified ``section`` of config file.
|
[
"Read",
"settings",
"from",
"specified",
"section",
"of",
"config",
"file",
"."
] |
758810fbd9411c2046a187afcac6532155cac694
|
https://github.com/PSU-OIT-ARC/django-local-settings/blob/758810fbd9411c2046a187afcac6532155cac694/local_settings/strategy.py#L122-L152
|
244,102
|
PSU-OIT-ARC/django-local-settings
|
local_settings/strategy.py
|
INIStrategy.get_default_section
|
def get_default_section(self, file_name):
"""Returns first non-DEFAULT section; falls back to DEFAULT."""
if not os.path.isfile(file_name):
return 'DEFAULT'
parser = self.make_parser()
with open(file_name) as fp:
parser.read_file(fp)
sections = parser.sections()
section = sections[0] if len(sections) > 0 else 'DEFAULT'
return section
|
python
|
def get_default_section(self, file_name):
"""Returns first non-DEFAULT section; falls back to DEFAULT."""
if not os.path.isfile(file_name):
return 'DEFAULT'
parser = self.make_parser()
with open(file_name) as fp:
parser.read_file(fp)
sections = parser.sections()
section = sections[0] if len(sections) > 0 else 'DEFAULT'
return section
|
[
"def",
"get_default_section",
"(",
"self",
",",
"file_name",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_name",
")",
":",
"return",
"'DEFAULT'",
"parser",
"=",
"self",
".",
"make_parser",
"(",
")",
"with",
"open",
"(",
"file_name",
")",
"as",
"fp",
":",
"parser",
".",
"read_file",
"(",
"fp",
")",
"sections",
"=",
"parser",
".",
"sections",
"(",
")",
"section",
"=",
"sections",
"[",
"0",
"]",
"if",
"len",
"(",
"sections",
")",
">",
"0",
"else",
"'DEFAULT'",
"return",
"section"
] |
Returns first non-DEFAULT section; falls back to DEFAULT.
|
[
"Returns",
"first",
"non",
"-",
"DEFAULT",
"section",
";",
"falls",
"back",
"to",
"DEFAULT",
"."
] |
758810fbd9411c2046a187afcac6532155cac694
|
https://github.com/PSU-OIT-ARC/django-local-settings/blob/758810fbd9411c2046a187afcac6532155cac694/local_settings/strategy.py#L176-L185
|
244,103
|
collectiveacuity/labPack
|
labpack/platforms/heroku.py
|
herokuClient._validate_install
|
def _validate_install(self):
''' a method to validate heroku is installed '''
self.printer('Checking heroku installation ... ', flush=True)
# import dependencies
from os import devnull
from subprocess import call, check_output
# validate cli installation
sys_command = 'heroku --version'
try:
call(sys_command, shell=True, stdout=open(devnull, 'wb'))
except Exception as err:
self.printer('ERROR')
raise Exception('"heroku cli" not installed. GoTo: https://devcenter.heroku.com/articles/heroku-cli')
# print response and return
self.printer('done.')
return True
|
python
|
def _validate_install(self):
''' a method to validate heroku is installed '''
self.printer('Checking heroku installation ... ', flush=True)
# import dependencies
from os import devnull
from subprocess import call, check_output
# validate cli installation
sys_command = 'heroku --version'
try:
call(sys_command, shell=True, stdout=open(devnull, 'wb'))
except Exception as err:
self.printer('ERROR')
raise Exception('"heroku cli" not installed. GoTo: https://devcenter.heroku.com/articles/heroku-cli')
# print response and return
self.printer('done.')
return True
|
[
"def",
"_validate_install",
"(",
"self",
")",
":",
"self",
".",
"printer",
"(",
"'Checking heroku installation ... '",
",",
"flush",
"=",
"True",
")",
"# import dependencies\r",
"from",
"os",
"import",
"devnull",
"from",
"subprocess",
"import",
"call",
",",
"check_output",
"# validate cli installation\r",
"sys_command",
"=",
"'heroku --version'",
"try",
":",
"call",
"(",
"sys_command",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"open",
"(",
"devnull",
",",
"'wb'",
")",
")",
"except",
"Exception",
"as",
"err",
":",
"self",
".",
"printer",
"(",
"'ERROR'",
")",
"raise",
"Exception",
"(",
"'\"heroku cli\" not installed. GoTo: https://devcenter.heroku.com/articles/heroku-cli'",
")",
"# print response and return\r",
"self",
".",
"printer",
"(",
"'done.'",
")",
"return",
"True"
] |
a method to validate heroku is installed
|
[
"a",
"method",
"to",
"validate",
"heroku",
"is",
"installed"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/heroku.py#L65-L86
|
244,104
|
collectiveacuity/labPack
|
labpack/platforms/heroku.py
|
herokuClient._update_netrc
|
def _update_netrc(self, netrc_path, auth_token, account_email):
''' a method to replace heroku login details in netrc file '''
# define patterns
import re
record_end = '(\n\n|\n\w|$)'
heroku_regex = re.compile('(machine\sapi\.heroku\.com.*?\nmachine\sgit\.heroku\.com.*?)%s' % record_end, re.S)
# retrieve netrc text
netrc_text = open(netrc_path).read().strip()
# replace text with new password and login
new_heroku = 'machine api.heroku.com\n password %s\n login %s\n' % (auth_token, account_email)
new_heroku += 'machine git.heroku.com\n password %s\n login %s\n\n' % (auth_token, account_email)
heroku_search = heroku_regex.findall(netrc_text)
if heroku_search:
if re.match('\n\w', heroku_search[0][1]):
new_heroku = new_heroku[:-1]
new_heroku += heroku_search[0][1]
netrc_text = heroku_regex.sub(new_heroku, netrc_text)
else:
netrc_text += '\n\n' + new_heroku
# save netrc
with open(netrc_path, 'wt') as f:
f.write(netrc_text)
f.close()
return netrc_text
|
python
|
def _update_netrc(self, netrc_path, auth_token, account_email):
''' a method to replace heroku login details in netrc file '''
# define patterns
import re
record_end = '(\n\n|\n\w|$)'
heroku_regex = re.compile('(machine\sapi\.heroku\.com.*?\nmachine\sgit\.heroku\.com.*?)%s' % record_end, re.S)
# retrieve netrc text
netrc_text = open(netrc_path).read().strip()
# replace text with new password and login
new_heroku = 'machine api.heroku.com\n password %s\n login %s\n' % (auth_token, account_email)
new_heroku += 'machine git.heroku.com\n password %s\n login %s\n\n' % (auth_token, account_email)
heroku_search = heroku_regex.findall(netrc_text)
if heroku_search:
if re.match('\n\w', heroku_search[0][1]):
new_heroku = new_heroku[:-1]
new_heroku += heroku_search[0][1]
netrc_text = heroku_regex.sub(new_heroku, netrc_text)
else:
netrc_text += '\n\n' + new_heroku
# save netrc
with open(netrc_path, 'wt') as f:
f.write(netrc_text)
f.close()
return netrc_text
|
[
"def",
"_update_netrc",
"(",
"self",
",",
"netrc_path",
",",
"auth_token",
",",
"account_email",
")",
":",
"# define patterns\r",
"import",
"re",
"record_end",
"=",
"'(\\n\\n|\\n\\w|$)'",
"heroku_regex",
"=",
"re",
".",
"compile",
"(",
"'(machine\\sapi\\.heroku\\.com.*?\\nmachine\\sgit\\.heroku\\.com.*?)%s'",
"%",
"record_end",
",",
"re",
".",
"S",
")",
"# retrieve netrc text\r",
"netrc_text",
"=",
"open",
"(",
"netrc_path",
")",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"# replace text with new password and login\r",
"new_heroku",
"=",
"'machine api.heroku.com\\n password %s\\n login %s\\n'",
"%",
"(",
"auth_token",
",",
"account_email",
")",
"new_heroku",
"+=",
"'machine git.heroku.com\\n password %s\\n login %s\\n\\n'",
"%",
"(",
"auth_token",
",",
"account_email",
")",
"heroku_search",
"=",
"heroku_regex",
".",
"findall",
"(",
"netrc_text",
")",
"if",
"heroku_search",
":",
"if",
"re",
".",
"match",
"(",
"'\\n\\w'",
",",
"heroku_search",
"[",
"0",
"]",
"[",
"1",
"]",
")",
":",
"new_heroku",
"=",
"new_heroku",
"[",
":",
"-",
"1",
"]",
"new_heroku",
"+=",
"heroku_search",
"[",
"0",
"]",
"[",
"1",
"]",
"netrc_text",
"=",
"heroku_regex",
".",
"sub",
"(",
"new_heroku",
",",
"netrc_text",
")",
"else",
":",
"netrc_text",
"+=",
"'\\n\\n'",
"+",
"new_heroku",
"# save netrc\r",
"with",
"open",
"(",
"netrc_path",
",",
"'wt'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"netrc_text",
")",
"f",
".",
"close",
"(",
")",
"return",
"netrc_text"
] |
a method to replace heroku login details in netrc file
|
[
"a",
"method",
"to",
"replace",
"heroku",
"login",
"details",
"in",
"netrc",
"file"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/heroku.py#L88-L117
|
244,105
|
collectiveacuity/labPack
|
labpack/platforms/heroku.py
|
herokuClient._validate_login
|
def _validate_login(self):
''' a method to validate user can access heroku account '''
title = '%s.validate_login' % self.__class__.__name__
# verbosity
windows_insert = ' On windows, run in cmd.exe'
self.printer('Checking heroku credentials ... ', flush=True)
# validate netrc exists
from os import path
netrc_path = path.join(self.localhost.home, '.netrc')
# TODO verify path exists on Windows
if not path.exists(netrc_path):
error_msg = '.netrc file is missing. Try: heroku login, then heroku auth:token'
if self.localhost.os.sysname in ('Windows'):
error_msg += windows_insert
self.printer('ERROR.')
raise Exception(error_msg)
# replace value in netrc
netrc_text = self._update_netrc(netrc_path, self.token, self.email)
# verify remote access
def handle_invalid(stdout, proc):
# define process closing helper
def _close_process(_proc):
# close process
import psutil
process = psutil.Process(_proc.pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
# restore values to netrc
with open(netrc_path, 'wt') as f:
f.write(netrc_text)
f.close()
# invalid credentials
if stdout.find('Invalid credentials') > -1:
_close_process(proc)
self.printer('ERROR.')
raise Exception('Permission denied. Heroku auth token is not valid.\nTry: "heroku login", then "heroku auth:token"')
sys_command = 'heroku apps --json'
response = self._handle_command(sys_command, interactive=handle_invalid, handle_error=True)
if response.find('Warning: heroku update') > -1:
self.printer('WARNING: heroku update available.')
self.printer('Try: npm install -g -U heroku\nor see https://devcenter.heroku.com/articles/heroku-cli#staying-up-to-date')
self.printer('Checking heroku credentials ... ')
response_lines = response.splitlines()
response = '\n'.join(response_lines[1:])
# add list to object
import json
try:
self.apps = json.loads(response)
except:
self.printer('ERROR.')
raise Exception(response)
self.printer('done.')
return self
|
python
|
def _validate_login(self):
''' a method to validate user can access heroku account '''
title = '%s.validate_login' % self.__class__.__name__
# verbosity
windows_insert = ' On windows, run in cmd.exe'
self.printer('Checking heroku credentials ... ', flush=True)
# validate netrc exists
from os import path
netrc_path = path.join(self.localhost.home, '.netrc')
# TODO verify path exists on Windows
if not path.exists(netrc_path):
error_msg = '.netrc file is missing. Try: heroku login, then heroku auth:token'
if self.localhost.os.sysname in ('Windows'):
error_msg += windows_insert
self.printer('ERROR.')
raise Exception(error_msg)
# replace value in netrc
netrc_text = self._update_netrc(netrc_path, self.token, self.email)
# verify remote access
def handle_invalid(stdout, proc):
# define process closing helper
def _close_process(_proc):
# close process
import psutil
process = psutil.Process(_proc.pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
# restore values to netrc
with open(netrc_path, 'wt') as f:
f.write(netrc_text)
f.close()
# invalid credentials
if stdout.find('Invalid credentials') > -1:
_close_process(proc)
self.printer('ERROR.')
raise Exception('Permission denied. Heroku auth token is not valid.\nTry: "heroku login", then "heroku auth:token"')
sys_command = 'heroku apps --json'
response = self._handle_command(sys_command, interactive=handle_invalid, handle_error=True)
if response.find('Warning: heroku update') > -1:
self.printer('WARNING: heroku update available.')
self.printer('Try: npm install -g -U heroku\nor see https://devcenter.heroku.com/articles/heroku-cli#staying-up-to-date')
self.printer('Checking heroku credentials ... ')
response_lines = response.splitlines()
response = '\n'.join(response_lines[1:])
# add list to object
import json
try:
self.apps = json.loads(response)
except:
self.printer('ERROR.')
raise Exception(response)
self.printer('done.')
return self
|
[
"def",
"_validate_login",
"(",
"self",
")",
":",
"title",
"=",
"'%s.validate_login'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# verbosity\r",
"windows_insert",
"=",
"' On windows, run in cmd.exe'",
"self",
".",
"printer",
"(",
"'Checking heroku credentials ... '",
",",
"flush",
"=",
"True",
")",
"# validate netrc exists\r",
"from",
"os",
"import",
"path",
"netrc_path",
"=",
"path",
".",
"join",
"(",
"self",
".",
"localhost",
".",
"home",
",",
"'.netrc'",
")",
"# TODO verify path exists on Windows\r",
"if",
"not",
"path",
".",
"exists",
"(",
"netrc_path",
")",
":",
"error_msg",
"=",
"'.netrc file is missing. Try: heroku login, then heroku auth:token'",
"if",
"self",
".",
"localhost",
".",
"os",
".",
"sysname",
"in",
"(",
"'Windows'",
")",
":",
"error_msg",
"+=",
"windows_insert",
"self",
".",
"printer",
"(",
"'ERROR.'",
")",
"raise",
"Exception",
"(",
"error_msg",
")",
"# replace value in netrc\r",
"netrc_text",
"=",
"self",
".",
"_update_netrc",
"(",
"netrc_path",
",",
"self",
".",
"token",
",",
"self",
".",
"email",
")",
"# verify remote access\r",
"def",
"handle_invalid",
"(",
"stdout",
",",
"proc",
")",
":",
"# define process closing helper\r",
"def",
"_close_process",
"(",
"_proc",
")",
":",
"# close process\r",
"import",
"psutil",
"process",
"=",
"psutil",
".",
"Process",
"(",
"_proc",
".",
"pid",
")",
"for",
"proc",
"in",
"process",
".",
"children",
"(",
"recursive",
"=",
"True",
")",
":",
"proc",
".",
"kill",
"(",
")",
"process",
".",
"kill",
"(",
")",
"# restore values to netrc\r",
"with",
"open",
"(",
"netrc_path",
",",
"'wt'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"netrc_text",
")",
"f",
".",
"close",
"(",
")",
"# invalid credentials\r",
"if",
"stdout",
".",
"find",
"(",
"'Invalid credentials'",
")",
">",
"-",
"1",
":",
"_close_process",
"(",
"proc",
")",
"self",
".",
"printer",
"(",
"'ERROR.'",
")",
"raise",
"Exception",
"(",
"'Permission denied. Heroku auth token is not valid.\\nTry: \"heroku login\", then \"heroku auth:token\"'",
")",
"sys_command",
"=",
"'heroku apps --json'",
"response",
"=",
"self",
".",
"_handle_command",
"(",
"sys_command",
",",
"interactive",
"=",
"handle_invalid",
",",
"handle_error",
"=",
"True",
")",
"if",
"response",
".",
"find",
"(",
"'Warning: heroku update'",
")",
">",
"-",
"1",
":",
"self",
".",
"printer",
"(",
"'WARNING: heroku update available.'",
")",
"self",
".",
"printer",
"(",
"'Try: npm install -g -U heroku\\nor see https://devcenter.heroku.com/articles/heroku-cli#staying-up-to-date'",
")",
"self",
".",
"printer",
"(",
"'Checking heroku credentials ... '",
")",
"response_lines",
"=",
"response",
".",
"splitlines",
"(",
")",
"response",
"=",
"'\\n'",
".",
"join",
"(",
"response_lines",
"[",
"1",
":",
"]",
")",
"# add list to object\r",
"import",
"json",
"try",
":",
"self",
".",
"apps",
"=",
"json",
".",
"loads",
"(",
"response",
")",
"except",
":",
"self",
".",
"printer",
"(",
"'ERROR.'",
")",
"raise",
"Exception",
"(",
"response",
")",
"self",
".",
"printer",
"(",
"'done.'",
")",
"return",
"self"
] |
a method to validate user can access heroku account
|
[
"a",
"method",
"to",
"validate",
"user",
"can",
"access",
"heroku",
"account"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/heroku.py#L119-L185
|
244,106
|
collectiveacuity/labPack
|
labpack/platforms/heroku.py
|
herokuClient.access
|
def access(self, app_subdomain):
''' a method to validate user can access app '''
title = '%s.access' % self.__class__.__name__
# validate input
input_fields = {
'app_subdomain': app_subdomain
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# verbosity
self.printer('Checking access to "%s" subdomain ... ' % app_subdomain, flush=True)
# confirm existence of subdomain
for app in self.apps:
if app['name'] == app_subdomain:
self.subdomain = app_subdomain
break
# refresh app list and search again
if not self.subdomain:
import json
response = self._handle_command('heroku apps --json', handle_error=True)
self.apps = json.loads(response)
for app in self.apps:
if app['name'] == app_subdomain:
self.subdomain = app_subdomain
break
# check reason for failure
if not self.subdomain:
sys_command = 'heroku ps -a %s' % app_subdomain
heroku_response = self._handle_command(sys_command, handle_error=True)
if heroku_response.find('find that app') > -1:
self.printer('ERROR')
raise Exception('%s does not exist. Try: heroku create -a %s' % (app_subdomain, app_subdomain))
elif heroku_response.find('have access to the app') > -1:
self.printer('ERROR')
raise Exception('%s belongs to another account.' % app_subdomain)
else:
self.printer('ERROR')
raise Exception('Some unknown issue prevents you from accessing %s' % app_subdomain)
self.printer('done.')
return self
|
python
|
def access(self, app_subdomain):
''' a method to validate user can access app '''
title = '%s.access' % self.__class__.__name__
# validate input
input_fields = {
'app_subdomain': app_subdomain
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# verbosity
self.printer('Checking access to "%s" subdomain ... ' % app_subdomain, flush=True)
# confirm existence of subdomain
for app in self.apps:
if app['name'] == app_subdomain:
self.subdomain = app_subdomain
break
# refresh app list and search again
if not self.subdomain:
import json
response = self._handle_command('heroku apps --json', handle_error=True)
self.apps = json.loads(response)
for app in self.apps:
if app['name'] == app_subdomain:
self.subdomain = app_subdomain
break
# check reason for failure
if not self.subdomain:
sys_command = 'heroku ps -a %s' % app_subdomain
heroku_response = self._handle_command(sys_command, handle_error=True)
if heroku_response.find('find that app') > -1:
self.printer('ERROR')
raise Exception('%s does not exist. Try: heroku create -a %s' % (app_subdomain, app_subdomain))
elif heroku_response.find('have access to the app') > -1:
self.printer('ERROR')
raise Exception('%s belongs to another account.' % app_subdomain)
else:
self.printer('ERROR')
raise Exception('Some unknown issue prevents you from accessing %s' % app_subdomain)
self.printer('done.')
return self
|
[
"def",
"access",
"(",
"self",
",",
"app_subdomain",
")",
":",
"title",
"=",
"'%s.access'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate input\r",
"input_fields",
"=",
"{",
"'app_subdomain'",
":",
"app_subdomain",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# verbosity\r",
"self",
".",
"printer",
"(",
"'Checking access to \"%s\" subdomain ... '",
"%",
"app_subdomain",
",",
"flush",
"=",
"True",
")",
"# confirm existence of subdomain\r",
"for",
"app",
"in",
"self",
".",
"apps",
":",
"if",
"app",
"[",
"'name'",
"]",
"==",
"app_subdomain",
":",
"self",
".",
"subdomain",
"=",
"app_subdomain",
"break",
"# refresh app list and search again\r",
"if",
"not",
"self",
".",
"subdomain",
":",
"import",
"json",
"response",
"=",
"self",
".",
"_handle_command",
"(",
"'heroku apps --json'",
",",
"handle_error",
"=",
"True",
")",
"self",
".",
"apps",
"=",
"json",
".",
"loads",
"(",
"response",
")",
"for",
"app",
"in",
"self",
".",
"apps",
":",
"if",
"app",
"[",
"'name'",
"]",
"==",
"app_subdomain",
":",
"self",
".",
"subdomain",
"=",
"app_subdomain",
"break",
"# check reason for failure\r",
"if",
"not",
"self",
".",
"subdomain",
":",
"sys_command",
"=",
"'heroku ps -a %s'",
"%",
"app_subdomain",
"heroku_response",
"=",
"self",
".",
"_handle_command",
"(",
"sys_command",
",",
"handle_error",
"=",
"True",
")",
"if",
"heroku_response",
".",
"find",
"(",
"'find that app'",
")",
">",
"-",
"1",
":",
"self",
".",
"printer",
"(",
"'ERROR'",
")",
"raise",
"Exception",
"(",
"'%s does not exist. Try: heroku create -a %s'",
"%",
"(",
"app_subdomain",
",",
"app_subdomain",
")",
")",
"elif",
"heroku_response",
".",
"find",
"(",
"'have access to the app'",
")",
">",
"-",
"1",
":",
"self",
".",
"printer",
"(",
"'ERROR'",
")",
"raise",
"Exception",
"(",
"'%s belongs to another account.'",
"%",
"app_subdomain",
")",
"else",
":",
"self",
".",
"printer",
"(",
"'ERROR'",
")",
"raise",
"Exception",
"(",
"'Some unknown issue prevents you from accessing %s'",
"%",
"app_subdomain",
")",
"self",
".",
"printer",
"(",
"'done.'",
")",
"return",
"self"
] |
a method to validate user can access app
|
[
"a",
"method",
"to",
"validate",
"user",
"can",
"access",
"app"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/heroku.py#L187-L237
|
244,107
|
collectiveacuity/labPack
|
labpack/platforms/heroku.py
|
herokuClient.deploy_docker
|
def deploy_docker(self, dockerfile_path, virtualbox_name='default'):
''' a method to deploy app to heroku using docker '''
title = '%s.deploy_docker' % self.__class__.__name__
# validate inputs
input_fields = {
'dockerfile_path': dockerfile_path,
'virtualbox_name': virtualbox_name
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# check app subdomain
if not self.subdomain:
raise Exception('You must access a subdomain before you can deploy to heroku. Try: %s.access()' % self.__class__.__name__)
# import dependencies
from os import path
# validate docker client
from labpack.platforms.docker import dockerClient
dockerClient(virtualbox_name, self.verbose)
# validate dockerfile
if not path.exists(dockerfile_path):
raise Exception('%s is not a valid path on local host.' % dockerfile_path)
dockerfile_root, dockerfile_node = path.split(dockerfile_path)
if dockerfile_node != 'Dockerfile':
raise Exception('heroku requires a file called Dockerfile to deploy using Docker.')
# validate container plugin
from os import devnull
from subprocess import check_output
self.printer('Checking heroku plugin requirements ... ', flush=True)
sys_command = 'heroku plugins --core'
heroku_plugins = check_output(sys_command, shell=True, stderr=open(devnull, 'wb')).decode('utf-8')
if heroku_plugins.find('heroku-container-registry') == -1 and heroku_plugins.find('container-registry') == -1:
sys_command = 'heroku plugins'
heroku_plugins = check_output(sys_command, shell=True, stderr=open(devnull, 'wb')).decode('utf-8')
if heroku_plugins.find('heroku-container-registry') == -1 and heroku_plugins.find('container-registry') == -1:
self.printer('ERROR')
raise Exception(
'heroku container registry required. Upgrade heroku-cli.')
self.printer('done.')
# verify container login
self.printer('Checking heroku container login ... ', flush=True)
sys_command = 'heroku container:login'
self._handle_command(sys_command)
self.printer('done.')
# Old Login Process (pre 2018.02.03)
# import pexpect
# try:
# child = pexpect.spawn('heroku container:login', timeout=5)
# child.expect('Email:\s?')
# child.sendline(self.email)
# i = child.expect([pexpect.EOF, pexpect.TIMEOUT])
# if i == 0:
# child.terminate()
# elif i == 1:
# child.terminate()
# raise Exception('Some unknown issue prevents Heroku from accepting credentials.\nTry first: heroku login')
# except Exception as err:
# self._check_connectivity(err)
# self.printer('done.')
# verbosity
self.printer('Building docker image ...')
# build docker image
sys_command = 'cd %s; heroku container:push web --app %s' % (dockerfile_root, self.subdomain)
self._handle_command(sys_command, print_pipe=True)
sys_command = 'cd %s; heroku container:release web --app %s' % (dockerfile_root, self.subdomain)
self._handle_command(sys_command, print_pipe=True)
self.printer('Deployment complete.')
return True
|
python
|
def deploy_docker(self, dockerfile_path, virtualbox_name='default'):
''' a method to deploy app to heroku using docker '''
title = '%s.deploy_docker' % self.__class__.__name__
# validate inputs
input_fields = {
'dockerfile_path': dockerfile_path,
'virtualbox_name': virtualbox_name
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# check app subdomain
if not self.subdomain:
raise Exception('You must access a subdomain before you can deploy to heroku. Try: %s.access()' % self.__class__.__name__)
# import dependencies
from os import path
# validate docker client
from labpack.platforms.docker import dockerClient
dockerClient(virtualbox_name, self.verbose)
# validate dockerfile
if not path.exists(dockerfile_path):
raise Exception('%s is not a valid path on local host.' % dockerfile_path)
dockerfile_root, dockerfile_node = path.split(dockerfile_path)
if dockerfile_node != 'Dockerfile':
raise Exception('heroku requires a file called Dockerfile to deploy using Docker.')
# validate container plugin
from os import devnull
from subprocess import check_output
self.printer('Checking heroku plugin requirements ... ', flush=True)
sys_command = 'heroku plugins --core'
heroku_plugins = check_output(sys_command, shell=True, stderr=open(devnull, 'wb')).decode('utf-8')
if heroku_plugins.find('heroku-container-registry') == -1 and heroku_plugins.find('container-registry') == -1:
sys_command = 'heroku plugins'
heroku_plugins = check_output(sys_command, shell=True, stderr=open(devnull, 'wb')).decode('utf-8')
if heroku_plugins.find('heroku-container-registry') == -1 and heroku_plugins.find('container-registry') == -1:
self.printer('ERROR')
raise Exception(
'heroku container registry required. Upgrade heroku-cli.')
self.printer('done.')
# verify container login
self.printer('Checking heroku container login ... ', flush=True)
sys_command = 'heroku container:login'
self._handle_command(sys_command)
self.printer('done.')
# Old Login Process (pre 2018.02.03)
# import pexpect
# try:
# child = pexpect.spawn('heroku container:login', timeout=5)
# child.expect('Email:\s?')
# child.sendline(self.email)
# i = child.expect([pexpect.EOF, pexpect.TIMEOUT])
# if i == 0:
# child.terminate()
# elif i == 1:
# child.terminate()
# raise Exception('Some unknown issue prevents Heroku from accepting credentials.\nTry first: heroku login')
# except Exception as err:
# self._check_connectivity(err)
# self.printer('done.')
# verbosity
self.printer('Building docker image ...')
# build docker image
sys_command = 'cd %s; heroku container:push web --app %s' % (dockerfile_root, self.subdomain)
self._handle_command(sys_command, print_pipe=True)
sys_command = 'cd %s; heroku container:release web --app %s' % (dockerfile_root, self.subdomain)
self._handle_command(sys_command, print_pipe=True)
self.printer('Deployment complete.')
return True
|
[
"def",
"deploy_docker",
"(",
"self",
",",
"dockerfile_path",
",",
"virtualbox_name",
"=",
"'default'",
")",
":",
"title",
"=",
"'%s.deploy_docker'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs\r",
"input_fields",
"=",
"{",
"'dockerfile_path'",
":",
"dockerfile_path",
",",
"'virtualbox_name'",
":",
"virtualbox_name",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# check app subdomain\r",
"if",
"not",
"self",
".",
"subdomain",
":",
"raise",
"Exception",
"(",
"'You must access a subdomain before you can deploy to heroku. Try: %s.access()'",
"%",
"self",
".",
"__class__",
".",
"__name__",
")",
"# import dependencies\r",
"from",
"os",
"import",
"path",
"# validate docker client\r",
"from",
"labpack",
".",
"platforms",
".",
"docker",
"import",
"dockerClient",
"dockerClient",
"(",
"virtualbox_name",
",",
"self",
".",
"verbose",
")",
"# validate dockerfile\r",
"if",
"not",
"path",
".",
"exists",
"(",
"dockerfile_path",
")",
":",
"raise",
"Exception",
"(",
"'%s is not a valid path on local host.'",
"%",
"dockerfile_path",
")",
"dockerfile_root",
",",
"dockerfile_node",
"=",
"path",
".",
"split",
"(",
"dockerfile_path",
")",
"if",
"dockerfile_node",
"!=",
"'Dockerfile'",
":",
"raise",
"Exception",
"(",
"'heroku requires a file called Dockerfile to deploy using Docker.'",
")",
"# validate container plugin\r",
"from",
"os",
"import",
"devnull",
"from",
"subprocess",
"import",
"check_output",
"self",
".",
"printer",
"(",
"'Checking heroku plugin requirements ... '",
",",
"flush",
"=",
"True",
")",
"sys_command",
"=",
"'heroku plugins --core'",
"heroku_plugins",
"=",
"check_output",
"(",
"sys_command",
",",
"shell",
"=",
"True",
",",
"stderr",
"=",
"open",
"(",
"devnull",
",",
"'wb'",
")",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"heroku_plugins",
".",
"find",
"(",
"'heroku-container-registry'",
")",
"==",
"-",
"1",
"and",
"heroku_plugins",
".",
"find",
"(",
"'container-registry'",
")",
"==",
"-",
"1",
":",
"sys_command",
"=",
"'heroku plugins'",
"heroku_plugins",
"=",
"check_output",
"(",
"sys_command",
",",
"shell",
"=",
"True",
",",
"stderr",
"=",
"open",
"(",
"devnull",
",",
"'wb'",
")",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"heroku_plugins",
".",
"find",
"(",
"'heroku-container-registry'",
")",
"==",
"-",
"1",
"and",
"heroku_plugins",
".",
"find",
"(",
"'container-registry'",
")",
"==",
"-",
"1",
":",
"self",
".",
"printer",
"(",
"'ERROR'",
")",
"raise",
"Exception",
"(",
"'heroku container registry required. Upgrade heroku-cli.'",
")",
"self",
".",
"printer",
"(",
"'done.'",
")",
"# verify container login\r",
"self",
".",
"printer",
"(",
"'Checking heroku container login ... '",
",",
"flush",
"=",
"True",
")",
"sys_command",
"=",
"'heroku container:login'",
"self",
".",
"_handle_command",
"(",
"sys_command",
")",
"self",
".",
"printer",
"(",
"'done.'",
")",
"# Old Login Process (pre 2018.02.03)\r",
"# import pexpect\r",
"# try:\r",
"# child = pexpect.spawn('heroku container:login', timeout=5)\r",
"# child.expect('Email:\\s?')\r",
"# child.sendline(self.email)\r",
"# i = child.expect([pexpect.EOF, pexpect.TIMEOUT])\r",
"# if i == 0:\r",
"# child.terminate()\r",
"# elif i == 1:\r",
"# child.terminate()\r",
"# raise Exception('Some unknown issue prevents Heroku from accepting credentials.\\nTry first: heroku login')\r",
"# except Exception as err:\r",
"# self._check_connectivity(err)\r",
"# self.printer('done.')\r",
"# verbosity\r",
"self",
".",
"printer",
"(",
"'Building docker image ...'",
")",
"# build docker image\r",
"sys_command",
"=",
"'cd %s; heroku container:push web --app %s'",
"%",
"(",
"dockerfile_root",
",",
"self",
".",
"subdomain",
")",
"self",
".",
"_handle_command",
"(",
"sys_command",
",",
"print_pipe",
"=",
"True",
")",
"sys_command",
"=",
"'cd %s; heroku container:release web --app %s'",
"%",
"(",
"dockerfile_root",
",",
"self",
".",
"subdomain",
")",
"self",
".",
"_handle_command",
"(",
"sys_command",
",",
"print_pipe",
"=",
"True",
")",
"self",
".",
"printer",
"(",
"'Deployment complete.'",
")",
"return",
"True"
] |
a method to deploy app to heroku using docker
|
[
"a",
"method",
"to",
"deploy",
"app",
"to",
"heroku",
"using",
"docker"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/heroku.py#L239-L319
|
244,108
|
quasipedia/swaggery
|
swaggery/appinit.py
|
init
|
def init():
'''Initialise a WSGI application to be loaded by uWSGI.'''
# Load values from config file
config_file = os.path.realpath(os.path.join(os.getcwd(), 'swaggery.ini'))
config = configparser.RawConfigParser(allow_no_value=True)
config.read(config_file)
log_level = config.get('application', 'logging_level').upper()
api_dirs = list(config['apis'])
do_checks = config.get('application',
'disable_boot_checks').lower() == 'false'
# Set logging level
log.setLevel(getattr(logging, log_level))
log.debug('Log level set to {}'.format(log_level))
# Bootstrap application
log.debug('Exploring directories: {}'.format(api_dirs))
application = Swaggery(api_dirs=api_dirs, do_checks=do_checks)
return application
|
python
|
def init():
'''Initialise a WSGI application to be loaded by uWSGI.'''
# Load values from config file
config_file = os.path.realpath(os.path.join(os.getcwd(), 'swaggery.ini'))
config = configparser.RawConfigParser(allow_no_value=True)
config.read(config_file)
log_level = config.get('application', 'logging_level').upper()
api_dirs = list(config['apis'])
do_checks = config.get('application',
'disable_boot_checks').lower() == 'false'
# Set logging level
log.setLevel(getattr(logging, log_level))
log.debug('Log level set to {}'.format(log_level))
# Bootstrap application
log.debug('Exploring directories: {}'.format(api_dirs))
application = Swaggery(api_dirs=api_dirs, do_checks=do_checks)
return application
|
[
"def",
"init",
"(",
")",
":",
"# Load values from config file",
"config_file",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'swaggery.ini'",
")",
")",
"config",
"=",
"configparser",
".",
"RawConfigParser",
"(",
"allow_no_value",
"=",
"True",
")",
"config",
".",
"read",
"(",
"config_file",
")",
"log_level",
"=",
"config",
".",
"get",
"(",
"'application'",
",",
"'logging_level'",
")",
".",
"upper",
"(",
")",
"api_dirs",
"=",
"list",
"(",
"config",
"[",
"'apis'",
"]",
")",
"do_checks",
"=",
"config",
".",
"get",
"(",
"'application'",
",",
"'disable_boot_checks'",
")",
".",
"lower",
"(",
")",
"==",
"'false'",
"# Set logging level",
"log",
".",
"setLevel",
"(",
"getattr",
"(",
"logging",
",",
"log_level",
")",
")",
"log",
".",
"debug",
"(",
"'Log level set to {}'",
".",
"format",
"(",
"log_level",
")",
")",
"# Bootstrap application",
"log",
".",
"debug",
"(",
"'Exploring directories: {}'",
".",
"format",
"(",
"api_dirs",
")",
")",
"application",
"=",
"Swaggery",
"(",
"api_dirs",
"=",
"api_dirs",
",",
"do_checks",
"=",
"do_checks",
")",
"return",
"application"
] |
Initialise a WSGI application to be loaded by uWSGI.
|
[
"Initialise",
"a",
"WSGI",
"application",
"to",
"be",
"loaded",
"by",
"uWSGI",
"."
] |
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
|
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/appinit.py#L10-L26
|
244,109
|
ravenac95/lxc4u
|
lxc4u/service.py
|
LXCService.list_names
|
def list_names(cls):
"""Lists all known LXC names"""
response = subwrap.run(['lxc-ls'])
output = response.std_out
return map(str.strip, output.splitlines())
|
python
|
def list_names(cls):
"""Lists all known LXC names"""
response = subwrap.run(['lxc-ls'])
output = response.std_out
return map(str.strip, output.splitlines())
|
[
"def",
"list_names",
"(",
"cls",
")",
":",
"response",
"=",
"subwrap",
".",
"run",
"(",
"[",
"'lxc-ls'",
"]",
")",
"output",
"=",
"response",
".",
"std_out",
"return",
"map",
"(",
"str",
".",
"strip",
",",
"output",
".",
"splitlines",
"(",
")",
")"
] |
Lists all known LXC names
|
[
"Lists",
"all",
"known",
"LXC",
"names"
] |
4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32
|
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/service.py#L18-L22
|
244,110
|
ravenac95/lxc4u
|
lxc4u/service.py
|
LXCService.info
|
def info(cls, name, get_state=True, get_pid=True):
"""Retrieves and parses info about an LXC"""
# Run lxc-info quietly
command = ['lxc-info', '-n', name]
response = subwrap.run(command)
lines = map(split_info_line, response.std_out.splitlines())
return dict(lines)
|
python
|
def info(cls, name, get_state=True, get_pid=True):
"""Retrieves and parses info about an LXC"""
# Run lxc-info quietly
command = ['lxc-info', '-n', name]
response = subwrap.run(command)
lines = map(split_info_line, response.std_out.splitlines())
return dict(lines)
|
[
"def",
"info",
"(",
"cls",
",",
"name",
",",
"get_state",
"=",
"True",
",",
"get_pid",
"=",
"True",
")",
":",
"# Run lxc-info quietly",
"command",
"=",
"[",
"'lxc-info'",
",",
"'-n'",
",",
"name",
"]",
"response",
"=",
"subwrap",
".",
"run",
"(",
"command",
")",
"lines",
"=",
"map",
"(",
"split_info_line",
",",
"response",
".",
"std_out",
".",
"splitlines",
"(",
")",
")",
"return",
"dict",
"(",
"lines",
")"
] |
Retrieves and parses info about an LXC
|
[
"Retrieves",
"and",
"parses",
"info",
"about",
"an",
"LXC"
] |
4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32
|
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/service.py#L62-L68
|
244,111
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/generate/wsdl2python.py
|
WriteServiceModule.getClientModuleName
|
def getClientModuleName(self):
"""client module name.
"""
name = GetModuleBaseNameFromWSDL(self._wsdl)
if not name:
raise WsdlGeneratorError, 'could not determine a service name'
if self.client_module_suffix is None:
return name
return '%s%s' %(name, self.client_module_suffix)
|
python
|
def getClientModuleName(self):
"""client module name.
"""
name = GetModuleBaseNameFromWSDL(self._wsdl)
if not name:
raise WsdlGeneratorError, 'could not determine a service name'
if self.client_module_suffix is None:
return name
return '%s%s' %(name, self.client_module_suffix)
|
[
"def",
"getClientModuleName",
"(",
"self",
")",
":",
"name",
"=",
"GetModuleBaseNameFromWSDL",
"(",
"self",
".",
"_wsdl",
")",
"if",
"not",
"name",
":",
"raise",
"WsdlGeneratorError",
",",
"'could not determine a service name'",
"if",
"self",
".",
"client_module_suffix",
"is",
"None",
":",
"return",
"name",
"return",
"'%s%s'",
"%",
"(",
"name",
",",
"self",
".",
"client_module_suffix",
")"
] |
client module name.
|
[
"client",
"module",
"name",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/wsdl2python.py#L74-L84
|
244,112
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/generate/wsdl2python.py
|
WriteServiceModule.getTypesModuleName
|
def getTypesModuleName(self):
"""types module name.
"""
if self.types_module_name is not None:
return self.types_module_name
name = GetModuleBaseNameFromWSDL(self._wsdl)
if not name:
raise WsdlGeneratorError, 'could not determine a service name'
if self.types_module_suffix is None:
return name
return '%s%s' %(name, self.types_module_suffix)
|
python
|
def getTypesModuleName(self):
"""types module name.
"""
if self.types_module_name is not None:
return self.types_module_name
name = GetModuleBaseNameFromWSDL(self._wsdl)
if not name:
raise WsdlGeneratorError, 'could not determine a service name'
if self.types_module_suffix is None:
return name
return '%s%s' %(name, self.types_module_suffix)
|
[
"def",
"getTypesModuleName",
"(",
"self",
")",
":",
"if",
"self",
".",
"types_module_name",
"is",
"not",
"None",
":",
"return",
"self",
".",
"types_module_name",
"name",
"=",
"GetModuleBaseNameFromWSDL",
"(",
"self",
".",
"_wsdl",
")",
"if",
"not",
"name",
":",
"raise",
"WsdlGeneratorError",
",",
"'could not determine a service name'",
"if",
"self",
".",
"types_module_suffix",
"is",
"None",
":",
"return",
"name",
"return",
"'%s%s'",
"%",
"(",
"name",
",",
"self",
".",
"types_module_suffix",
")"
] |
types module name.
|
[
"types",
"module",
"name",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/wsdl2python.py#L102-L115
|
244,113
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/generate/wsdl2python.py
|
WriteServiceModule.gatherNamespaces
|
def gatherNamespaces(self):
'''This method must execute once.. Grab all schemas
representing each targetNamespace.
'''
if self.usedNamespaces is not None:
return
self.logger.debug('gatherNamespaces')
self.usedNamespaces = {}
# Add all schemas defined in wsdl
# to used namespace and to the Alias dict
for schema in self._wsdl.types.values():
tns = schema.getTargetNamespace()
self.logger.debug('Register schema(%s) -- TNS(%s)'\
%(_get_idstr(schema), tns),)
if self.usedNamespaces.has_key(tns) is False:
self.usedNamespaces[tns] = []
self.usedNamespaces[tns].append(schema)
NAD.add(tns)
# Add all xsd:import schema instances
# to used namespace and to the Alias dict
for k,v in SchemaReader.namespaceToSchema.items():
self.logger.debug('Register schema(%s) -- TNS(%s)'\
%(_get_idstr(v), k),)
if self.usedNamespaces.has_key(k) is False:
self.usedNamespaces[k] = []
self.usedNamespaces[k].append(v)
NAD.add(k)
|
python
|
def gatherNamespaces(self):
'''This method must execute once.. Grab all schemas
representing each targetNamespace.
'''
if self.usedNamespaces is not None:
return
self.logger.debug('gatherNamespaces')
self.usedNamespaces = {}
# Add all schemas defined in wsdl
# to used namespace and to the Alias dict
for schema in self._wsdl.types.values():
tns = schema.getTargetNamespace()
self.logger.debug('Register schema(%s) -- TNS(%s)'\
%(_get_idstr(schema), tns),)
if self.usedNamespaces.has_key(tns) is False:
self.usedNamespaces[tns] = []
self.usedNamespaces[tns].append(schema)
NAD.add(tns)
# Add all xsd:import schema instances
# to used namespace and to the Alias dict
for k,v in SchemaReader.namespaceToSchema.items():
self.logger.debug('Register schema(%s) -- TNS(%s)'\
%(_get_idstr(v), k),)
if self.usedNamespaces.has_key(k) is False:
self.usedNamespaces[k] = []
self.usedNamespaces[k].append(v)
NAD.add(k)
|
[
"def",
"gatherNamespaces",
"(",
"self",
")",
":",
"if",
"self",
".",
"usedNamespaces",
"is",
"not",
"None",
":",
"return",
"self",
".",
"logger",
".",
"debug",
"(",
"'gatherNamespaces'",
")",
"self",
".",
"usedNamespaces",
"=",
"{",
"}",
"# Add all schemas defined in wsdl",
"# to used namespace and to the Alias dict",
"for",
"schema",
"in",
"self",
".",
"_wsdl",
".",
"types",
".",
"values",
"(",
")",
":",
"tns",
"=",
"schema",
".",
"getTargetNamespace",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Register schema(%s) -- TNS(%s)'",
"%",
"(",
"_get_idstr",
"(",
"schema",
")",
",",
"tns",
")",
",",
")",
"if",
"self",
".",
"usedNamespaces",
".",
"has_key",
"(",
"tns",
")",
"is",
"False",
":",
"self",
".",
"usedNamespaces",
"[",
"tns",
"]",
"=",
"[",
"]",
"self",
".",
"usedNamespaces",
"[",
"tns",
"]",
".",
"append",
"(",
"schema",
")",
"NAD",
".",
"add",
"(",
"tns",
")",
"# Add all xsd:import schema instances",
"# to used namespace and to the Alias dict",
"for",
"k",
",",
"v",
"in",
"SchemaReader",
".",
"namespaceToSchema",
".",
"items",
"(",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Register schema(%s) -- TNS(%s)'",
"%",
"(",
"_get_idstr",
"(",
"v",
")",
",",
"k",
")",
",",
")",
"if",
"self",
".",
"usedNamespaces",
".",
"has_key",
"(",
"k",
")",
"is",
"False",
":",
"self",
".",
"usedNamespaces",
"[",
"k",
"]",
"=",
"[",
"]",
"self",
".",
"usedNamespaces",
"[",
"k",
"]",
".",
"append",
"(",
"v",
")",
"NAD",
".",
"add",
"(",
"k",
")"
] |
This method must execute once.. Grab all schemas
representing each targetNamespace.
|
[
"This",
"method",
"must",
"execute",
"once",
"..",
"Grab",
"all",
"schemas",
"representing",
"each",
"targetNamespace",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/wsdl2python.py#L146-L175
|
244,114
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/generate/wsdl2python.py
|
WriteServiceModule.writeTypes
|
def writeTypes(self, fd):
"""write out types module to file descriptor.
"""
print >>fd, '#'*50
print >>fd, '# file: %s.py' %self.getTypesModuleName()
print >>fd, '#'
print >>fd, '# schema types generated by "%s"' %self.__class__
print >>fd, '# %s' %' '.join(sys.argv)
print >>fd, '#'
print >>fd, '#'*50
print >>fd, TypesHeaderContainer()
self.gatherNamespaces()
for l in self.usedNamespaces.values():
sd = SchemaDescription(do_extended=self.do_extended,
extPyClasses=self.extPyClasses)
for schema in l:
sd.fromSchema(schema)
sd.write(fd)
|
python
|
def writeTypes(self, fd):
"""write out types module to file descriptor.
"""
print >>fd, '#'*50
print >>fd, '# file: %s.py' %self.getTypesModuleName()
print >>fd, '#'
print >>fd, '# schema types generated by "%s"' %self.__class__
print >>fd, '# %s' %' '.join(sys.argv)
print >>fd, '#'
print >>fd, '#'*50
print >>fd, TypesHeaderContainer()
self.gatherNamespaces()
for l in self.usedNamespaces.values():
sd = SchemaDescription(do_extended=self.do_extended,
extPyClasses=self.extPyClasses)
for schema in l:
sd.fromSchema(schema)
sd.write(fd)
|
[
"def",
"writeTypes",
"(",
"self",
",",
"fd",
")",
":",
"print",
">>",
"fd",
",",
"'#'",
"*",
"50",
"print",
">>",
"fd",
",",
"'# file: %s.py'",
"%",
"self",
".",
"getTypesModuleName",
"(",
")",
"print",
">>",
"fd",
",",
"'#'",
"print",
">>",
"fd",
",",
"'# schema types generated by \"%s\"'",
"%",
"self",
".",
"__class__",
"print",
">>",
"fd",
",",
"'# %s'",
"%",
"' '",
".",
"join",
"(",
"sys",
".",
"argv",
")",
"print",
">>",
"fd",
",",
"'#'",
"print",
">>",
"fd",
",",
"'#'",
"*",
"50",
"print",
">>",
"fd",
",",
"TypesHeaderContainer",
"(",
")",
"self",
".",
"gatherNamespaces",
"(",
")",
"for",
"l",
"in",
"self",
".",
"usedNamespaces",
".",
"values",
"(",
")",
":",
"sd",
"=",
"SchemaDescription",
"(",
"do_extended",
"=",
"self",
".",
"do_extended",
",",
"extPyClasses",
"=",
"self",
".",
"extPyClasses",
")",
"for",
"schema",
"in",
"l",
":",
"sd",
".",
"fromSchema",
"(",
"schema",
")",
"sd",
".",
"write",
"(",
"fd",
")"
] |
write out types module to file descriptor.
|
[
"write",
"out",
"types",
"module",
"to",
"file",
"descriptor",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/wsdl2python.py#L215-L233
|
244,115
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/generate/wsdl2python.py
|
SchemaDescription.fromSchema
|
def fromSchema(self, schema):
''' Can be called multiple times, but will not redefine a
previously defined type definition or element declaration.
'''
ns = schema.getTargetNamespace()
assert self.targetNamespace is None or self.targetNamespace == ns,\
'SchemaDescription instance represents %s, not %s'\
%(self.targetNamespace, ns)
if self.targetNamespace is None:
self.targetNamespace = ns
self.classHead.ns = self.classFoot.ns = ns
for item in [t for t in schema.types if t.getAttributeName() not in self.__types]:
self.__types.append(item.getAttributeName())
self.items.append(TypeWriter(do_extended=self.do_extended, extPyClasses=self.extPyClasses))
self.items[-1].fromSchemaItem(item)
for item in [e for e in schema.elements if e.getAttributeName() not in self.__elements]:
self.__elements.append(item.getAttributeName())
self.items.append(ElementWriter(do_extended=self.do_extended))
self.items[-1].fromSchemaItem(item)
|
python
|
def fromSchema(self, schema):
''' Can be called multiple times, but will not redefine a
previously defined type definition or element declaration.
'''
ns = schema.getTargetNamespace()
assert self.targetNamespace is None or self.targetNamespace == ns,\
'SchemaDescription instance represents %s, not %s'\
%(self.targetNamespace, ns)
if self.targetNamespace is None:
self.targetNamespace = ns
self.classHead.ns = self.classFoot.ns = ns
for item in [t for t in schema.types if t.getAttributeName() not in self.__types]:
self.__types.append(item.getAttributeName())
self.items.append(TypeWriter(do_extended=self.do_extended, extPyClasses=self.extPyClasses))
self.items[-1].fromSchemaItem(item)
for item in [e for e in schema.elements if e.getAttributeName() not in self.__elements]:
self.__elements.append(item.getAttributeName())
self.items.append(ElementWriter(do_extended=self.do_extended))
self.items[-1].fromSchemaItem(item)
|
[
"def",
"fromSchema",
"(",
"self",
",",
"schema",
")",
":",
"ns",
"=",
"schema",
".",
"getTargetNamespace",
"(",
")",
"assert",
"self",
".",
"targetNamespace",
"is",
"None",
"or",
"self",
".",
"targetNamespace",
"==",
"ns",
",",
"'SchemaDescription instance represents %s, not %s'",
"%",
"(",
"self",
".",
"targetNamespace",
",",
"ns",
")",
"if",
"self",
".",
"targetNamespace",
"is",
"None",
":",
"self",
".",
"targetNamespace",
"=",
"ns",
"self",
".",
"classHead",
".",
"ns",
"=",
"self",
".",
"classFoot",
".",
"ns",
"=",
"ns",
"for",
"item",
"in",
"[",
"t",
"for",
"t",
"in",
"schema",
".",
"types",
"if",
"t",
".",
"getAttributeName",
"(",
")",
"not",
"in",
"self",
".",
"__types",
"]",
":",
"self",
".",
"__types",
".",
"append",
"(",
"item",
".",
"getAttributeName",
"(",
")",
")",
"self",
".",
"items",
".",
"append",
"(",
"TypeWriter",
"(",
"do_extended",
"=",
"self",
".",
"do_extended",
",",
"extPyClasses",
"=",
"self",
".",
"extPyClasses",
")",
")",
"self",
".",
"items",
"[",
"-",
"1",
"]",
".",
"fromSchemaItem",
"(",
"item",
")",
"for",
"item",
"in",
"[",
"e",
"for",
"e",
"in",
"schema",
".",
"elements",
"if",
"e",
".",
"getAttributeName",
"(",
")",
"not",
"in",
"self",
".",
"__elements",
"]",
":",
"self",
".",
"__elements",
".",
"append",
"(",
"item",
".",
"getAttributeName",
"(",
")",
")",
"self",
".",
"items",
".",
"append",
"(",
"ElementWriter",
"(",
"do_extended",
"=",
"self",
".",
"do_extended",
")",
")",
"self",
".",
"items",
"[",
"-",
"1",
"]",
".",
"fromSchemaItem",
"(",
"item",
")"
] |
Can be called multiple times, but will not redefine a
previously defined type definition or element declaration.
|
[
"Can",
"be",
"called",
"multiple",
"times",
"but",
"will",
"not",
"redefine",
"a",
"previously",
"defined",
"type",
"definition",
"or",
"element",
"declaration",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/wsdl2python.py#L401-L422
|
244,116
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/generate/wsdl2python.py
|
SchemaDescription.write
|
def write(self, fd):
"""write out to file descriptor.
"""
print >>fd, self.classHead
for t in self.items:
print >>fd, t
print >>fd, self.classFoot
|
python
|
def write(self, fd):
"""write out to file descriptor.
"""
print >>fd, self.classHead
for t in self.items:
print >>fd, t
print >>fd, self.classFoot
|
[
"def",
"write",
"(",
"self",
",",
"fd",
")",
":",
"print",
">>",
"fd",
",",
"self",
".",
"classHead",
"for",
"t",
"in",
"self",
".",
"items",
":",
"print",
">>",
"fd",
",",
"t",
"print",
">>",
"fd",
",",
"self",
".",
"classFoot"
] |
write out to file descriptor.
|
[
"write",
"out",
"to",
"file",
"descriptor",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/wsdl2python.py#L430-L436
|
244,117
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/generate/wsdl2python.py
|
ElementWriter.fromSchemaItem
|
def fromSchemaItem(self, item):
"""set up global elements.
"""
if item.isElement() is False or item.isLocal() is True:
raise TypeError, 'expecting global element declaration: %s' %item.getItemTrace()
local = False
qName = item.getAttribute('type')
if not qName:
etp = item.content
local = True
else:
etp = item.getTypeDefinition('type')
if etp is None:
if local is True:
self.content = ElementLocalComplexTypeContainer(do_extended=self.do_extended)
else:
self.content = ElementSimpleTypeContainer()
elif etp.isLocal() is False:
self.content = ElementGlobalDefContainer()
elif etp.isSimple() is True:
self.content = ElementLocalSimpleTypeContainer()
elif etp.isComplex():
self.content = ElementLocalComplexTypeContainer(do_extended=self.do_extended)
else:
raise Wsdl2PythonError, "Unknown element declaration: %s" %item.getItemTrace()
self.logger.debug('ElementWriter setUp container "%r", Schema Item "%s"' %(
self.content, item.getItemTrace()))
self.content.setUp(item)
|
python
|
def fromSchemaItem(self, item):
"""set up global elements.
"""
if item.isElement() is False or item.isLocal() is True:
raise TypeError, 'expecting global element declaration: %s' %item.getItemTrace()
local = False
qName = item.getAttribute('type')
if not qName:
etp = item.content
local = True
else:
etp = item.getTypeDefinition('type')
if etp is None:
if local is True:
self.content = ElementLocalComplexTypeContainer(do_extended=self.do_extended)
else:
self.content = ElementSimpleTypeContainer()
elif etp.isLocal() is False:
self.content = ElementGlobalDefContainer()
elif etp.isSimple() is True:
self.content = ElementLocalSimpleTypeContainer()
elif etp.isComplex():
self.content = ElementLocalComplexTypeContainer(do_extended=self.do_extended)
else:
raise Wsdl2PythonError, "Unknown element declaration: %s" %item.getItemTrace()
self.logger.debug('ElementWriter setUp container "%r", Schema Item "%s"' %(
self.content, item.getItemTrace()))
self.content.setUp(item)
|
[
"def",
"fromSchemaItem",
"(",
"self",
",",
"item",
")",
":",
"if",
"item",
".",
"isElement",
"(",
")",
"is",
"False",
"or",
"item",
".",
"isLocal",
"(",
")",
"is",
"True",
":",
"raise",
"TypeError",
",",
"'expecting global element declaration: %s'",
"%",
"item",
".",
"getItemTrace",
"(",
")",
"local",
"=",
"False",
"qName",
"=",
"item",
".",
"getAttribute",
"(",
"'type'",
")",
"if",
"not",
"qName",
":",
"etp",
"=",
"item",
".",
"content",
"local",
"=",
"True",
"else",
":",
"etp",
"=",
"item",
".",
"getTypeDefinition",
"(",
"'type'",
")",
"if",
"etp",
"is",
"None",
":",
"if",
"local",
"is",
"True",
":",
"self",
".",
"content",
"=",
"ElementLocalComplexTypeContainer",
"(",
"do_extended",
"=",
"self",
".",
"do_extended",
")",
"else",
":",
"self",
".",
"content",
"=",
"ElementSimpleTypeContainer",
"(",
")",
"elif",
"etp",
".",
"isLocal",
"(",
")",
"is",
"False",
":",
"self",
".",
"content",
"=",
"ElementGlobalDefContainer",
"(",
")",
"elif",
"etp",
".",
"isSimple",
"(",
")",
"is",
"True",
":",
"self",
".",
"content",
"=",
"ElementLocalSimpleTypeContainer",
"(",
")",
"elif",
"etp",
".",
"isComplex",
"(",
")",
":",
"self",
".",
"content",
"=",
"ElementLocalComplexTypeContainer",
"(",
"do_extended",
"=",
"self",
".",
"do_extended",
")",
"else",
":",
"raise",
"Wsdl2PythonError",
",",
"\"Unknown element declaration: %s\"",
"%",
"item",
".",
"getItemTrace",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'ElementWriter setUp container \"%r\", Schema Item \"%s\"'",
"%",
"(",
"self",
".",
"content",
",",
"item",
".",
"getItemTrace",
"(",
")",
")",
")",
"self",
".",
"content",
".",
"setUp",
"(",
"item",
")"
] |
set up global elements.
|
[
"set",
"up",
"global",
"elements",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/wsdl2python.py#L462-L493
|
244,118
|
yougov/yg.lockfile
|
yg/lockfile/__init__.py
|
LockBase.acquire
|
def acquire(self):
"""
Attempt to acquire the lock every `delay` seconds until the
lock is acquired or until `timeout` has expired.
Raises FileLockTimeout if the timeout is exceeded.
Errors opening the lock file (other than if it exists) are
passed through.
"""
self.lock = retry_call(
self._attempt,
retries=float('inf'),
trap=zc.lockfile.LockError,
cleanup=functools.partial(self._check_timeout, timing.Stopwatch()),
)
|
python
|
def acquire(self):
"""
Attempt to acquire the lock every `delay` seconds until the
lock is acquired or until `timeout` has expired.
Raises FileLockTimeout if the timeout is exceeded.
Errors opening the lock file (other than if it exists) are
passed through.
"""
self.lock = retry_call(
self._attempt,
retries=float('inf'),
trap=zc.lockfile.LockError,
cleanup=functools.partial(self._check_timeout, timing.Stopwatch()),
)
|
[
"def",
"acquire",
"(",
"self",
")",
":",
"self",
".",
"lock",
"=",
"retry_call",
"(",
"self",
".",
"_attempt",
",",
"retries",
"=",
"float",
"(",
"'inf'",
")",
",",
"trap",
"=",
"zc",
".",
"lockfile",
".",
"LockError",
",",
"cleanup",
"=",
"functools",
".",
"partial",
"(",
"self",
".",
"_check_timeout",
",",
"timing",
".",
"Stopwatch",
"(",
")",
")",
",",
")"
] |
Attempt to acquire the lock every `delay` seconds until the
lock is acquired or until `timeout` has expired.
Raises FileLockTimeout if the timeout is exceeded.
Errors opening the lock file (other than if it exists) are
passed through.
|
[
"Attempt",
"to",
"acquire",
"the",
"lock",
"every",
"delay",
"seconds",
"until",
"the",
"lock",
"is",
"acquired",
"or",
"until",
"timeout",
"has",
"expired",
"."
] |
e6bf1e5e6a9aedc657b1fcf5601693da50744cfe
|
https://github.com/yougov/yg.lockfile/blob/e6bf1e5e6a9aedc657b1fcf5601693da50744cfe/yg/lockfile/__init__.py#L67-L82
|
244,119
|
yougov/yg.lockfile
|
yg/lockfile/__init__.py
|
LockBase.release
|
def release(self):
"""
Release the lock and cleanup
"""
lock = vars(self).pop('lock', missing)
lock is not missing and self._release(lock)
|
python
|
def release(self):
"""
Release the lock and cleanup
"""
lock = vars(self).pop('lock', missing)
lock is not missing and self._release(lock)
|
[
"def",
"release",
"(",
"self",
")",
":",
"lock",
"=",
"vars",
"(",
"self",
")",
".",
"pop",
"(",
"'lock'",
",",
"missing",
")",
"lock",
"is",
"not",
"missing",
"and",
"self",
".",
"_release",
"(",
"lock",
")"
] |
Release the lock and cleanup
|
[
"Release",
"the",
"lock",
"and",
"cleanup"
] |
e6bf1e5e6a9aedc657b1fcf5601693da50744cfe
|
https://github.com/yougov/yg.lockfile/blob/e6bf1e5e6a9aedc657b1fcf5601693da50744cfe/yg/lockfile/__init__.py#L84-L89
|
244,120
|
praekelt/panya
|
panya/generic/views.py
|
GenericBase._resolve_view_params
|
def _resolve_view_params(self, request, defaults, *args, **kwargs):
"""
Resolves view params with least ammount of resistance.
Firstly check for params on urls passed args, then on class init args or members,
and lastly on class get methods .
"""
params = copy.copy(defaults)
params.update(self.params)
params.update(kwargs)
resolved_params = {}
extra_context = {}
for key in params:
# grab from provided params.
value = params[key]
# otherwise grab from existing params
if value == None:
value = self.params[key] if self.params.has_key(key) else None
# otherwise grab from class method
if value == None:
value = getattr(self, 'get_%s' % key)(request, *args, **kwargs) if getattr(self, 'get_%s' % key, None) else None
if key in defaults:
resolved_params[key] = value
else:
extra_context[key] = value
if extra_context:
try:
resolved_params['extra_context'].update(extra_context)
except AttributeError:
resolved_params['extra_context'] = extra_context
return resolved_params
|
python
|
def _resolve_view_params(self, request, defaults, *args, **kwargs):
"""
Resolves view params with least ammount of resistance.
Firstly check for params on urls passed args, then on class init args or members,
and lastly on class get methods .
"""
params = copy.copy(defaults)
params.update(self.params)
params.update(kwargs)
resolved_params = {}
extra_context = {}
for key in params:
# grab from provided params.
value = params[key]
# otherwise grab from existing params
if value == None:
value = self.params[key] if self.params.has_key(key) else None
# otherwise grab from class method
if value == None:
value = getattr(self, 'get_%s' % key)(request, *args, **kwargs) if getattr(self, 'get_%s' % key, None) else None
if key in defaults:
resolved_params[key] = value
else:
extra_context[key] = value
if extra_context:
try:
resolved_params['extra_context'].update(extra_context)
except AttributeError:
resolved_params['extra_context'] = extra_context
return resolved_params
|
[
"def",
"_resolve_view_params",
"(",
"self",
",",
"request",
",",
"defaults",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"copy",
".",
"copy",
"(",
"defaults",
")",
"params",
".",
"update",
"(",
"self",
".",
"params",
")",
"params",
".",
"update",
"(",
"kwargs",
")",
"resolved_params",
"=",
"{",
"}",
"extra_context",
"=",
"{",
"}",
"for",
"key",
"in",
"params",
":",
"# grab from provided params. ",
"value",
"=",
"params",
"[",
"key",
"]",
"# otherwise grab from existing params",
"if",
"value",
"==",
"None",
":",
"value",
"=",
"self",
".",
"params",
"[",
"key",
"]",
"if",
"self",
".",
"params",
".",
"has_key",
"(",
"key",
")",
"else",
"None",
"# otherwise grab from class method",
"if",
"value",
"==",
"None",
":",
"value",
"=",
"getattr",
"(",
"self",
",",
"'get_%s'",
"%",
"key",
")",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"getattr",
"(",
"self",
",",
"'get_%s'",
"%",
"key",
",",
"None",
")",
"else",
"None",
"if",
"key",
"in",
"defaults",
":",
"resolved_params",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"extra_context",
"[",
"key",
"]",
"=",
"value",
"if",
"extra_context",
":",
"try",
":",
"resolved_params",
"[",
"'extra_context'",
"]",
".",
"update",
"(",
"extra_context",
")",
"except",
"AttributeError",
":",
"resolved_params",
"[",
"'extra_context'",
"]",
"=",
"extra_context",
"return",
"resolved_params"
] |
Resolves view params with least ammount of resistance.
Firstly check for params on urls passed args, then on class init args or members,
and lastly on class get methods .
|
[
"Resolves",
"view",
"params",
"with",
"least",
"ammount",
"of",
"resistance",
".",
"Firstly",
"check",
"for",
"params",
"on",
"urls",
"passed",
"args",
"then",
"on",
"class",
"init",
"args",
"or",
"members",
"and",
"lastly",
"on",
"class",
"get",
"methods",
"."
] |
0fd621e15a7c11a2716a9554a2f820d6259818e5
|
https://github.com/praekelt/panya/blob/0fd621e15a7c11a2716a9554a2f820d6259818e5/panya/generic/views.py#L50-L85
|
244,121
|
praekelt/panya
|
panya/generic/views.py
|
GenericForm.handle_valid
|
def handle_valid(self, form=None, *args, **kwargs):
"""
Called after the form has validated.
"""
# Take a chance and try save a subclass of a ModelForm.
if hasattr(form, 'save'):
form.save()
# Also try and call handle_valid method of the form itself.
if hasattr(form, 'handle_valid'):
form.handle_valid(*args, **kwargs)
|
python
|
def handle_valid(self, form=None, *args, **kwargs):
"""
Called after the form has validated.
"""
# Take a chance and try save a subclass of a ModelForm.
if hasattr(form, 'save'):
form.save()
# Also try and call handle_valid method of the form itself.
if hasattr(form, 'handle_valid'):
form.handle_valid(*args, **kwargs)
|
[
"def",
"handle_valid",
"(",
"self",
",",
"form",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Take a chance and try save a subclass of a ModelForm.",
"if",
"hasattr",
"(",
"form",
",",
"'save'",
")",
":",
"form",
".",
"save",
"(",
")",
"# Also try and call handle_valid method of the form itself.",
"if",
"hasattr",
"(",
"form",
",",
"'handle_valid'",
")",
":",
"form",
".",
"handle_valid",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Called after the form has validated.
|
[
"Called",
"after",
"the",
"form",
"has",
"validated",
"."
] |
0fd621e15a7c11a2716a9554a2f820d6259818e5
|
https://github.com/praekelt/panya/blob/0fd621e15a7c11a2716a9554a2f820d6259818e5/panya/generic/views.py#L172-L181
|
244,122
|
lwcook/horsetail-matching
|
horsetailmatching/weightedsum.py
|
WeightedSum.evalMetric
|
def evalMetric(self, x, w1=None, w2=None):
'''Evaluates the weighted sum metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param float w1: value to weight the mean by
:param float w2: value to weight the std by
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
'''
if w1 is None:
w1 = self.w1
if w2 is None:
w2 = self.w2
if self.verbose:
print('----------')
print('At design: ' + str(x))
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose: print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
if self.verbose: print('Evaluating metric')
return self._evalWeightedSumMetric(q_samples, grad_samples)
|
python
|
def evalMetric(self, x, w1=None, w2=None):
'''Evaluates the weighted sum metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param float w1: value to weight the mean by
:param float w2: value to weight the std by
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
'''
if w1 is None:
w1 = self.w1
if w2 is None:
w2 = self.w2
if self.verbose:
print('----------')
print('At design: ' + str(x))
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose: print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
if self.verbose: print('Evaluating metric')
return self._evalWeightedSumMetric(q_samples, grad_samples)
|
[
"def",
"evalMetric",
"(",
"self",
",",
"x",
",",
"w1",
"=",
"None",
",",
"w2",
"=",
"None",
")",
":",
"if",
"w1",
"is",
"None",
":",
"w1",
"=",
"self",
".",
"w1",
"if",
"w2",
"is",
"None",
":",
"w2",
"=",
"self",
".",
"w2",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'----------'",
")",
"print",
"(",
"'At design: '",
"+",
"str",
"(",
"x",
")",
")",
"self",
".",
"_N_dv",
"=",
"len",
"(",
"_makeIter",
"(",
"x",
")",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'Evaluating surrogate'",
")",
"if",
"self",
".",
"surrogate",
"is",
"None",
":",
"def",
"fqoi",
"(",
"u",
")",
":",
"return",
"self",
".",
"fqoi",
"(",
"x",
",",
"u",
")",
"def",
"fgrad",
"(",
"u",
")",
":",
"return",
"self",
".",
"jac",
"(",
"x",
",",
"u",
")",
"jac",
"=",
"self",
".",
"jac",
"else",
":",
"fqoi",
",",
"fgrad",
",",
"surr_jac",
"=",
"self",
".",
"_makeSurrogates",
"(",
"x",
")",
"jac",
"=",
"surr_jac",
"u_samples",
"=",
"self",
".",
"_getParameterSamples",
"(",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'Evaluating quantity of interest at samples'",
")",
"q_samples",
",",
"grad_samples",
"=",
"self",
".",
"_evalSamples",
"(",
"u_samples",
",",
"fqoi",
",",
"fgrad",
",",
"jac",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'Evaluating metric'",
")",
"return",
"self",
".",
"_evalWeightedSumMetric",
"(",
"q_samples",
",",
"grad_samples",
")"
] |
Evaluates the weighted sum metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param float w1: value to weight the mean by
:param float w2: value to weight the std by
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
|
[
"Evaluates",
"the",
"weighted",
"sum",
"metric",
"at",
"given",
"values",
"of",
"the",
"design",
"variables",
"."
] |
f3d5f8d01249debbca978f412ce4eae017458119
|
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/weightedsum.py#L96-L143
|
244,123
|
IntegralDefense/critsapi
|
critsapi/critsapi.py
|
CRITsAPI.add_event
|
def add_event(self,
source,
reference,
event_title,
event_type,
method='',
description='',
bucket_list=[],
campaign='',
confidence='',
date=None):
"""
Adds an event. If the event name already exists, it will return that
event instead.
Args:
source: Source of the information
reference: A reference where more information can be found
event_title: The title of the event
event_type: The type of event. See your CRITs vocabulary.
method: The method for obtaining the event.
description: A text description of the event.
bucket_list: A list of bucket list items to add
campaign: An associated campaign
confidence: The campaign confidence
date: A datetime.datetime object of when the event occurred.
Returns:
A JSON event object or None if there was an error.
"""
# Check to see if the event already exists
events = self.get_events(event_title)
if events is not None:
if events['meta']['total_count'] == 1:
return events['objects'][0]
if events['meta']['total_count'] > 1:
log.error('Multiple events found while trying to add the event'
': {}'.format(event_title))
return None
# Now we can create the event
data = {
'api_key': self.api_key,
'username': self.username,
'source': source,
'reference': reference,
'method': method,
'campaign': campaign,
'confidence': confidence,
'description': description,
'event_type': event_type,
'date': date,
'title': event_title,
'bucket_list': ','.join(bucket_list),
}
r = requests.post('{}/events/'.format(self.url), data=data,
verify=self.verify, proxies=self.proxies)
if r.status_code == 200:
log.debug('Event created: {}'.format(event_title))
json_obj = json.loads(r.text)
if 'id' not in json_obj:
log.error('Error adding event. id not returned.')
return None
return json_obj
else:
log.error('Event creation failed with status code: '
'{}'.format(r.status_code))
return None
|
python
|
def add_event(self,
source,
reference,
event_title,
event_type,
method='',
description='',
bucket_list=[],
campaign='',
confidence='',
date=None):
"""
Adds an event. If the event name already exists, it will return that
event instead.
Args:
source: Source of the information
reference: A reference where more information can be found
event_title: The title of the event
event_type: The type of event. See your CRITs vocabulary.
method: The method for obtaining the event.
description: A text description of the event.
bucket_list: A list of bucket list items to add
campaign: An associated campaign
confidence: The campaign confidence
date: A datetime.datetime object of when the event occurred.
Returns:
A JSON event object or None if there was an error.
"""
# Check to see if the event already exists
events = self.get_events(event_title)
if events is not None:
if events['meta']['total_count'] == 1:
return events['objects'][0]
if events['meta']['total_count'] > 1:
log.error('Multiple events found while trying to add the event'
': {}'.format(event_title))
return None
# Now we can create the event
data = {
'api_key': self.api_key,
'username': self.username,
'source': source,
'reference': reference,
'method': method,
'campaign': campaign,
'confidence': confidence,
'description': description,
'event_type': event_type,
'date': date,
'title': event_title,
'bucket_list': ','.join(bucket_list),
}
r = requests.post('{}/events/'.format(self.url), data=data,
verify=self.verify, proxies=self.proxies)
if r.status_code == 200:
log.debug('Event created: {}'.format(event_title))
json_obj = json.loads(r.text)
if 'id' not in json_obj:
log.error('Error adding event. id not returned.')
return None
return json_obj
else:
log.error('Event creation failed with status code: '
'{}'.format(r.status_code))
return None
|
[
"def",
"add_event",
"(",
"self",
",",
"source",
",",
"reference",
",",
"event_title",
",",
"event_type",
",",
"method",
"=",
"''",
",",
"description",
"=",
"''",
",",
"bucket_list",
"=",
"[",
"]",
",",
"campaign",
"=",
"''",
",",
"confidence",
"=",
"''",
",",
"date",
"=",
"None",
")",
":",
"# Check to see if the event already exists",
"events",
"=",
"self",
".",
"get_events",
"(",
"event_title",
")",
"if",
"events",
"is",
"not",
"None",
":",
"if",
"events",
"[",
"'meta'",
"]",
"[",
"'total_count'",
"]",
"==",
"1",
":",
"return",
"events",
"[",
"'objects'",
"]",
"[",
"0",
"]",
"if",
"events",
"[",
"'meta'",
"]",
"[",
"'total_count'",
"]",
">",
"1",
":",
"log",
".",
"error",
"(",
"'Multiple events found while trying to add the event'",
"': {}'",
".",
"format",
"(",
"event_title",
")",
")",
"return",
"None",
"# Now we can create the event",
"data",
"=",
"{",
"'api_key'",
":",
"self",
".",
"api_key",
",",
"'username'",
":",
"self",
".",
"username",
",",
"'source'",
":",
"source",
",",
"'reference'",
":",
"reference",
",",
"'method'",
":",
"method",
",",
"'campaign'",
":",
"campaign",
",",
"'confidence'",
":",
"confidence",
",",
"'description'",
":",
"description",
",",
"'event_type'",
":",
"event_type",
",",
"'date'",
":",
"date",
",",
"'title'",
":",
"event_title",
",",
"'bucket_list'",
":",
"','",
".",
"join",
"(",
"bucket_list",
")",
",",
"}",
"r",
"=",
"requests",
".",
"post",
"(",
"'{}/events/'",
".",
"format",
"(",
"self",
".",
"url",
")",
",",
"data",
"=",
"data",
",",
"verify",
"=",
"self",
".",
"verify",
",",
"proxies",
"=",
"self",
".",
"proxies",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"log",
".",
"debug",
"(",
"'Event created: {}'",
".",
"format",
"(",
"event_title",
")",
")",
"json_obj",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"if",
"'id'",
"not",
"in",
"json_obj",
":",
"log",
".",
"error",
"(",
"'Error adding event. id not returned.'",
")",
"return",
"None",
"return",
"json_obj",
"else",
":",
"log",
".",
"error",
"(",
"'Event creation failed with status code: '",
"'{}'",
".",
"format",
"(",
"r",
".",
"status_code",
")",
")",
"return",
"None"
] |
Adds an event. If the event name already exists, it will return that
event instead.
Args:
source: Source of the information
reference: A reference where more information can be found
event_title: The title of the event
event_type: The type of event. See your CRITs vocabulary.
method: The method for obtaining the event.
description: A text description of the event.
bucket_list: A list of bucket list items to add
campaign: An associated campaign
confidence: The campaign confidence
date: A datetime.datetime object of when the event occurred.
Returns:
A JSON event object or None if there was an error.
|
[
"Adds",
"an",
"event",
".",
"If",
"the",
"event",
"name",
"already",
"exists",
"it",
"will",
"return",
"that",
"event",
"instead",
"."
] |
e770bd81e124eaaeb5f1134ba95f4a35ff345c5a
|
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L117-L183
|
244,124
|
IntegralDefense/critsapi
|
critsapi/critsapi.py
|
CRITsAPI.add_sample_file
|
def add_sample_file(self,
sample_path,
source,
reference,
method='',
file_format='raw',
file_password='',
sample_name='',
campaign='',
confidence='',
description='',
bucket_list=[]):
"""
Adds a file sample. For meta data only use add_sample_meta.
Args:
sample_path: The path on disk of the sample to upload
source: Source of the information
reference: A reference where more information can be found
method: The method for obtaining the sample.
file_format: Must be raw, zip, or rar.
file_password: The password of a zip or rar archived sample
sample_name: Specify a filename for the sample rather than using
the name on disk
campaign: An associated campaign
confidence: The campaign confidence
description: A text description of the sample
bucket_list: A list of bucket list items to add
Returns:
A JSON sample object or None if there was an error.
"""
if os.path.isfile(sample_path):
data = {
'api_key': self.api_key,
'username': self.username,
'source': source,
'reference': reference,
'method': method,
'filetype': file_format,
'upload_type': 'file',
'campaign': campaign,
'confidence': confidence,
'description': description,
'bucket_list': ','.join(bucket_list),
}
if sample_name != '':
data['filename'] = sample_name
with open(sample_path, 'rb') as fdata:
if file_password:
data['password'] = file_password
r = requests.post('{0}/samples/'.format(self.url),
data=data,
files={'filedata': fdata},
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
return result_data
else:
log.error('Error with status code {0} and message '
'{1}'.format(r.status_code, r.text))
return None
|
python
|
def add_sample_file(self,
sample_path,
source,
reference,
method='',
file_format='raw',
file_password='',
sample_name='',
campaign='',
confidence='',
description='',
bucket_list=[]):
"""
Adds a file sample. For meta data only use add_sample_meta.
Args:
sample_path: The path on disk of the sample to upload
source: Source of the information
reference: A reference where more information can be found
method: The method for obtaining the sample.
file_format: Must be raw, zip, or rar.
file_password: The password of a zip or rar archived sample
sample_name: Specify a filename for the sample rather than using
the name on disk
campaign: An associated campaign
confidence: The campaign confidence
description: A text description of the sample
bucket_list: A list of bucket list items to add
Returns:
A JSON sample object or None if there was an error.
"""
if os.path.isfile(sample_path):
data = {
'api_key': self.api_key,
'username': self.username,
'source': source,
'reference': reference,
'method': method,
'filetype': file_format,
'upload_type': 'file',
'campaign': campaign,
'confidence': confidence,
'description': description,
'bucket_list': ','.join(bucket_list),
}
if sample_name != '':
data['filename'] = sample_name
with open(sample_path, 'rb') as fdata:
if file_password:
data['password'] = file_password
r = requests.post('{0}/samples/'.format(self.url),
data=data,
files={'filedata': fdata},
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
return result_data
else:
log.error('Error with status code {0} and message '
'{1}'.format(r.status_code, r.text))
return None
|
[
"def",
"add_sample_file",
"(",
"self",
",",
"sample_path",
",",
"source",
",",
"reference",
",",
"method",
"=",
"''",
",",
"file_format",
"=",
"'raw'",
",",
"file_password",
"=",
"''",
",",
"sample_name",
"=",
"''",
",",
"campaign",
"=",
"''",
",",
"confidence",
"=",
"''",
",",
"description",
"=",
"''",
",",
"bucket_list",
"=",
"[",
"]",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"sample_path",
")",
":",
"data",
"=",
"{",
"'api_key'",
":",
"self",
".",
"api_key",
",",
"'username'",
":",
"self",
".",
"username",
",",
"'source'",
":",
"source",
",",
"'reference'",
":",
"reference",
",",
"'method'",
":",
"method",
",",
"'filetype'",
":",
"file_format",
",",
"'upload_type'",
":",
"'file'",
",",
"'campaign'",
":",
"campaign",
",",
"'confidence'",
":",
"confidence",
",",
"'description'",
":",
"description",
",",
"'bucket_list'",
":",
"','",
".",
"join",
"(",
"bucket_list",
")",
",",
"}",
"if",
"sample_name",
"!=",
"''",
":",
"data",
"[",
"'filename'",
"]",
"=",
"sample_name",
"with",
"open",
"(",
"sample_path",
",",
"'rb'",
")",
"as",
"fdata",
":",
"if",
"file_password",
":",
"data",
"[",
"'password'",
"]",
"=",
"file_password",
"r",
"=",
"requests",
".",
"post",
"(",
"'{0}/samples/'",
".",
"format",
"(",
"self",
".",
"url",
")",
",",
"data",
"=",
"data",
",",
"files",
"=",
"{",
"'filedata'",
":",
"fdata",
"}",
",",
"verify",
"=",
"self",
".",
"verify",
",",
"proxies",
"=",
"self",
".",
"proxies",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"result_data",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"return",
"result_data",
"else",
":",
"log",
".",
"error",
"(",
"'Error with status code {0} and message '",
"'{1}'",
".",
"format",
"(",
"r",
".",
"status_code",
",",
"r",
".",
"text",
")",
")",
"return",
"None"
] |
Adds a file sample. For meta data only use add_sample_meta.
Args:
sample_path: The path on disk of the sample to upload
source: Source of the information
reference: A reference where more information can be found
method: The method for obtaining the sample.
file_format: Must be raw, zip, or rar.
file_password: The password of a zip or rar archived sample
sample_name: Specify a filename for the sample rather than using
the name on disk
campaign: An associated campaign
confidence: The campaign confidence
description: A text description of the sample
bucket_list: A list of bucket list items to add
Returns:
A JSON sample object or None if there was an error.
|
[
"Adds",
"a",
"file",
"sample",
".",
"For",
"meta",
"data",
"only",
"use",
"add_sample_meta",
"."
] |
e770bd81e124eaaeb5f1134ba95f4a35ff345c5a
|
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L185-L246
|
244,125
|
IntegralDefense/critsapi
|
critsapi/critsapi.py
|
CRITsAPI.add_sample_meta
|
def add_sample_meta(self,
source,
reference,
method='',
filename='',
md5='',
sha1='',
sha256='',
size='',
mimetype='',
campaign='',
confidence='',
description='',
bucket_list=[]):
"""
Adds a metadata sample. To add an actual file, use add_sample_file.
Args:
source: Source of the information
reference: A reference where more information can be found
method: The method for obtaining the sample.
filename: The name of the file.
md5: An MD5 hash of the file.
sha1: SHA1 hash of the file.
sha256: SHA256 hash of the file.
size: size of the file.
mimetype: The mimetype of the file.
campaign: An associated campaign
confidence: The campaign confidence
bucket_list: A list of bucket list items to add
upload_type: Either 'file' or 'meta'
Returns:
A JSON sample object or None if there was an error.
"""
data = {
'api_key': self.api_key,
'username': self.username,
'source': source,
'reference': reference,
'method': method,
'filename': filename,
'md5': md5,
'sha1': sha1,
'sha256': sha256,
'size': size,
'mimetype': mimetype,
'upload_type': 'meta',
'campaign': campaign,
'confidence': confidence,
'bucket_list': ','.join(bucket_list),
}
r = requests.post('{0}/samples/'.format(self.url),
data=data,
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
return result_data
else:
log.error('Error with status code {0} and message '
'{1}'.format(r.status_code, r.text))
return None
|
python
|
def add_sample_meta(self,
source,
reference,
method='',
filename='',
md5='',
sha1='',
sha256='',
size='',
mimetype='',
campaign='',
confidence='',
description='',
bucket_list=[]):
"""
Adds a metadata sample. To add an actual file, use add_sample_file.
Args:
source: Source of the information
reference: A reference where more information can be found
method: The method for obtaining the sample.
filename: The name of the file.
md5: An MD5 hash of the file.
sha1: SHA1 hash of the file.
sha256: SHA256 hash of the file.
size: size of the file.
mimetype: The mimetype of the file.
campaign: An associated campaign
confidence: The campaign confidence
bucket_list: A list of bucket list items to add
upload_type: Either 'file' or 'meta'
Returns:
A JSON sample object or None if there was an error.
"""
data = {
'api_key': self.api_key,
'username': self.username,
'source': source,
'reference': reference,
'method': method,
'filename': filename,
'md5': md5,
'sha1': sha1,
'sha256': sha256,
'size': size,
'mimetype': mimetype,
'upload_type': 'meta',
'campaign': campaign,
'confidence': confidence,
'bucket_list': ','.join(bucket_list),
}
r = requests.post('{0}/samples/'.format(self.url),
data=data,
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
return result_data
else:
log.error('Error with status code {0} and message '
'{1}'.format(r.status_code, r.text))
return None
|
[
"def",
"add_sample_meta",
"(",
"self",
",",
"source",
",",
"reference",
",",
"method",
"=",
"''",
",",
"filename",
"=",
"''",
",",
"md5",
"=",
"''",
",",
"sha1",
"=",
"''",
",",
"sha256",
"=",
"''",
",",
"size",
"=",
"''",
",",
"mimetype",
"=",
"''",
",",
"campaign",
"=",
"''",
",",
"confidence",
"=",
"''",
",",
"description",
"=",
"''",
",",
"bucket_list",
"=",
"[",
"]",
")",
":",
"data",
"=",
"{",
"'api_key'",
":",
"self",
".",
"api_key",
",",
"'username'",
":",
"self",
".",
"username",
",",
"'source'",
":",
"source",
",",
"'reference'",
":",
"reference",
",",
"'method'",
":",
"method",
",",
"'filename'",
":",
"filename",
",",
"'md5'",
":",
"md5",
",",
"'sha1'",
":",
"sha1",
",",
"'sha256'",
":",
"sha256",
",",
"'size'",
":",
"size",
",",
"'mimetype'",
":",
"mimetype",
",",
"'upload_type'",
":",
"'meta'",
",",
"'campaign'",
":",
"campaign",
",",
"'confidence'",
":",
"confidence",
",",
"'bucket_list'",
":",
"','",
".",
"join",
"(",
"bucket_list",
")",
",",
"}",
"r",
"=",
"requests",
".",
"post",
"(",
"'{0}/samples/'",
".",
"format",
"(",
"self",
".",
"url",
")",
",",
"data",
"=",
"data",
",",
"verify",
"=",
"self",
".",
"verify",
",",
"proxies",
"=",
"self",
".",
"proxies",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"result_data",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"return",
"result_data",
"else",
":",
"log",
".",
"error",
"(",
"'Error with status code {0} and message '",
"'{1}'",
".",
"format",
"(",
"r",
".",
"status_code",
",",
"r",
".",
"text",
")",
")",
"return",
"None"
] |
Adds a metadata sample. To add an actual file, use add_sample_file.
Args:
source: Source of the information
reference: A reference where more information can be found
method: The method for obtaining the sample.
filename: The name of the file.
md5: An MD5 hash of the file.
sha1: SHA1 hash of the file.
sha256: SHA256 hash of the file.
size: size of the file.
mimetype: The mimetype of the file.
campaign: An associated campaign
confidence: The campaign confidence
bucket_list: A list of bucket list items to add
upload_type: Either 'file' or 'meta'
Returns:
A JSON sample object or None if there was an error.
|
[
"Adds",
"a",
"metadata",
"sample",
".",
"To",
"add",
"an",
"actual",
"file",
"use",
"add_sample_file",
"."
] |
e770bd81e124eaaeb5f1134ba95f4a35ff345c5a
|
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L248-L309
|
244,126
|
IntegralDefense/critsapi
|
critsapi/critsapi.py
|
CRITsAPI.add_email
|
def add_email(self,
email_path,
source,
reference,
method='',
upload_type='raw',
campaign='',
confidence='',
description='',
bucket_list=[],
password=''):
"""
Add an email object to CRITs. Only RAW, MSG, and EML are supported
currently.
Args:
email_path: The path on disk of the email.
source: Source of the information
reference: A reference where more information can be found
method: The method for obtaining the email.
upload_type: 'raw', 'eml', or 'msg'
campaign: An associated campaign
confidence: The campaign confidence
description: A description of the email
bucket_list: A list of bucket list items to add
password: A password for a 'msg' type.
Returns:
A JSON email object from CRITs or None if there was an error.
"""
if not os.path.isfile(email_path):
log.error('{} is not a file'.format(email_path))
return None
with open(email_path, 'rb') as fdata:
data = {
'api_key': self.api_key,
'username': self.username,
'source': source,
'reference': reference,
'method': method,
'upload_type': upload_type,
'campaign': campaign,
'confidence': confidence,
'bucket_list': bucket_list,
'description': description,
}
if password:
data['password'] = password
r = requests.post("{0}/emails/".format(self.url),
data=data,
files={'filedata': fdata},
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
return result_data
else:
print('Error with status code {0} and message '
'{1}'.format(r.status_code, r.text))
return None
|
python
|
def add_email(self,
email_path,
source,
reference,
method='',
upload_type='raw',
campaign='',
confidence='',
description='',
bucket_list=[],
password=''):
"""
Add an email object to CRITs. Only RAW, MSG, and EML are supported
currently.
Args:
email_path: The path on disk of the email.
source: Source of the information
reference: A reference where more information can be found
method: The method for obtaining the email.
upload_type: 'raw', 'eml', or 'msg'
campaign: An associated campaign
confidence: The campaign confidence
description: A description of the email
bucket_list: A list of bucket list items to add
password: A password for a 'msg' type.
Returns:
A JSON email object from CRITs or None if there was an error.
"""
if not os.path.isfile(email_path):
log.error('{} is not a file'.format(email_path))
return None
with open(email_path, 'rb') as fdata:
data = {
'api_key': self.api_key,
'username': self.username,
'source': source,
'reference': reference,
'method': method,
'upload_type': upload_type,
'campaign': campaign,
'confidence': confidence,
'bucket_list': bucket_list,
'description': description,
}
if password:
data['password'] = password
r = requests.post("{0}/emails/".format(self.url),
data=data,
files={'filedata': fdata},
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
return result_data
else:
print('Error with status code {0} and message '
'{1}'.format(r.status_code, r.text))
return None
|
[
"def",
"add_email",
"(",
"self",
",",
"email_path",
",",
"source",
",",
"reference",
",",
"method",
"=",
"''",
",",
"upload_type",
"=",
"'raw'",
",",
"campaign",
"=",
"''",
",",
"confidence",
"=",
"''",
",",
"description",
"=",
"''",
",",
"bucket_list",
"=",
"[",
"]",
",",
"password",
"=",
"''",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"email_path",
")",
":",
"log",
".",
"error",
"(",
"'{} is not a file'",
".",
"format",
"(",
"email_path",
")",
")",
"return",
"None",
"with",
"open",
"(",
"email_path",
",",
"'rb'",
")",
"as",
"fdata",
":",
"data",
"=",
"{",
"'api_key'",
":",
"self",
".",
"api_key",
",",
"'username'",
":",
"self",
".",
"username",
",",
"'source'",
":",
"source",
",",
"'reference'",
":",
"reference",
",",
"'method'",
":",
"method",
",",
"'upload_type'",
":",
"upload_type",
",",
"'campaign'",
":",
"campaign",
",",
"'confidence'",
":",
"confidence",
",",
"'bucket_list'",
":",
"bucket_list",
",",
"'description'",
":",
"description",
",",
"}",
"if",
"password",
":",
"data",
"[",
"'password'",
"]",
"=",
"password",
"r",
"=",
"requests",
".",
"post",
"(",
"\"{0}/emails/\"",
".",
"format",
"(",
"self",
".",
"url",
")",
",",
"data",
"=",
"data",
",",
"files",
"=",
"{",
"'filedata'",
":",
"fdata",
"}",
",",
"verify",
"=",
"self",
".",
"verify",
",",
"proxies",
"=",
"self",
".",
"proxies",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"result_data",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"return",
"result_data",
"else",
":",
"print",
"(",
"'Error with status code {0} and message '",
"'{1}'",
".",
"format",
"(",
"r",
".",
"status_code",
",",
"r",
".",
"text",
")",
")",
"return",
"None"
] |
Add an email object to CRITs. Only RAW, MSG, and EML are supported
currently.
Args:
email_path: The path on disk of the email.
source: Source of the information
reference: A reference where more information can be found
method: The method for obtaining the email.
upload_type: 'raw', 'eml', or 'msg'
campaign: An associated campaign
confidence: The campaign confidence
description: A description of the email
bucket_list: A list of bucket list items to add
password: A password for a 'msg' type.
Returns:
A JSON email object from CRITs or None if there was an error.
|
[
"Add",
"an",
"email",
"object",
"to",
"CRITs",
".",
"Only",
"RAW",
"MSG",
"and",
"EML",
"are",
"supported",
"currently",
"."
] |
e770bd81e124eaaeb5f1134ba95f4a35ff345c5a
|
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L311-L369
|
244,127
|
IntegralDefense/critsapi
|
critsapi/critsapi.py
|
CRITsAPI.add_backdoor
|
def add_backdoor(self,
backdoor_name,
source,
reference,
method='',
aliases=[],
version='',
campaign='',
confidence='',
description='',
bucket_list=[]):
"""
Add a backdoor object to CRITs.
Args:
backdoor_name: The primary name of the backdoor
source: Source of the information
reference: A reference where more information can be found
method: The method for obtaining the backdoor information.
aliases: List of aliases for the backdoor.
version: Version
campaign: An associated campaign
confidence: The campaign confidence
description: A description of the email
bucket_list: A list of bucket list items to add
"""
data = {
'api_key': self.api_key,
'username': self.username,
'source': source,
'reference': reference,
'method': method,
'name': backdoor_name,
'aliases': ','.join(aliases),
'version': version,
'campaign': campaign,
'confidence': confidence,
'bucket_list': bucket_list,
'description': description,
}
r = requests.post('{0}/backdoors/'.format(self.url),
data=data,
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
return result_data
else:
log.error('Error with status code {0} and message '
'{1}'.format(r.status_code, r.text))
return None
|
python
|
def add_backdoor(self,
backdoor_name,
source,
reference,
method='',
aliases=[],
version='',
campaign='',
confidence='',
description='',
bucket_list=[]):
"""
Add a backdoor object to CRITs.
Args:
backdoor_name: The primary name of the backdoor
source: Source of the information
reference: A reference where more information can be found
method: The method for obtaining the backdoor information.
aliases: List of aliases for the backdoor.
version: Version
campaign: An associated campaign
confidence: The campaign confidence
description: A description of the email
bucket_list: A list of bucket list items to add
"""
data = {
'api_key': self.api_key,
'username': self.username,
'source': source,
'reference': reference,
'method': method,
'name': backdoor_name,
'aliases': ','.join(aliases),
'version': version,
'campaign': campaign,
'confidence': confidence,
'bucket_list': bucket_list,
'description': description,
}
r = requests.post('{0}/backdoors/'.format(self.url),
data=data,
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
return result_data
else:
log.error('Error with status code {0} and message '
'{1}'.format(r.status_code, r.text))
return None
|
[
"def",
"add_backdoor",
"(",
"self",
",",
"backdoor_name",
",",
"source",
",",
"reference",
",",
"method",
"=",
"''",
",",
"aliases",
"=",
"[",
"]",
",",
"version",
"=",
"''",
",",
"campaign",
"=",
"''",
",",
"confidence",
"=",
"''",
",",
"description",
"=",
"''",
",",
"bucket_list",
"=",
"[",
"]",
")",
":",
"data",
"=",
"{",
"'api_key'",
":",
"self",
".",
"api_key",
",",
"'username'",
":",
"self",
".",
"username",
",",
"'source'",
":",
"source",
",",
"'reference'",
":",
"reference",
",",
"'method'",
":",
"method",
",",
"'name'",
":",
"backdoor_name",
",",
"'aliases'",
":",
"','",
".",
"join",
"(",
"aliases",
")",
",",
"'version'",
":",
"version",
",",
"'campaign'",
":",
"campaign",
",",
"'confidence'",
":",
"confidence",
",",
"'bucket_list'",
":",
"bucket_list",
",",
"'description'",
":",
"description",
",",
"}",
"r",
"=",
"requests",
".",
"post",
"(",
"'{0}/backdoors/'",
".",
"format",
"(",
"self",
".",
"url",
")",
",",
"data",
"=",
"data",
",",
"verify",
"=",
"self",
".",
"verify",
",",
"proxies",
"=",
"self",
".",
"proxies",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"result_data",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"return",
"result_data",
"else",
":",
"log",
".",
"error",
"(",
"'Error with status code {0} and message '",
"'{1}'",
".",
"format",
"(",
"r",
".",
"status_code",
",",
"r",
".",
"text",
")",
")",
"return",
"None"
] |
Add a backdoor object to CRITs.
Args:
backdoor_name: The primary name of the backdoor
source: Source of the information
reference: A reference where more information can be found
method: The method for obtaining the backdoor information.
aliases: List of aliases for the backdoor.
version: Version
campaign: An associated campaign
confidence: The campaign confidence
description: A description of the email
bucket_list: A list of bucket list items to add
|
[
"Add",
"a",
"backdoor",
"object",
"to",
"CRITs",
"."
] |
e770bd81e124eaaeb5f1134ba95f4a35ff345c5a
|
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L371-L421
|
244,128
|
IntegralDefense/critsapi
|
critsapi/critsapi.py
|
CRITsAPI.get_events
|
def get_events(self, event_title, regex=False):
"""
Search for events with the provided title
Args:
event_title: The title of the event
Returns:
An event JSON object returned from the server with the following:
{
"meta":{
"limit": 20, "next": null, "offset": 0,
"previous": null, "total_count": 3
},
"objects": [{}, {}, etc]
}
or None if an error occurred.
"""
regex_val = 0
if regex:
regex_val = 1
r = requests.get('{0}/events/?api_key={1}&username={2}&c-title='
'{3}®ex={4}'.format(self.url, self.api_key,
self.username, event_title,
regex_val), verify=self.verify)
if r.status_code == 200:
json_obj = json.loads(r.text)
return json_obj
else:
log.error('Non-200 status code from get_event: '
'{}'.format(r.status_code))
return None
|
python
|
def get_events(self, event_title, regex=False):
"""
Search for events with the provided title
Args:
event_title: The title of the event
Returns:
An event JSON object returned from the server with the following:
{
"meta":{
"limit": 20, "next": null, "offset": 0,
"previous": null, "total_count": 3
},
"objects": [{}, {}, etc]
}
or None if an error occurred.
"""
regex_val = 0
if regex:
regex_val = 1
r = requests.get('{0}/events/?api_key={1}&username={2}&c-title='
'{3}®ex={4}'.format(self.url, self.api_key,
self.username, event_title,
regex_val), verify=self.verify)
if r.status_code == 200:
json_obj = json.loads(r.text)
return json_obj
else:
log.error('Non-200 status code from get_event: '
'{}'.format(r.status_code))
return None
|
[
"def",
"get_events",
"(",
"self",
",",
"event_title",
",",
"regex",
"=",
"False",
")",
":",
"regex_val",
"=",
"0",
"if",
"regex",
":",
"regex_val",
"=",
"1",
"r",
"=",
"requests",
".",
"get",
"(",
"'{0}/events/?api_key={1}&username={2}&c-title='",
"'{3}®ex={4}'",
".",
"format",
"(",
"self",
".",
"url",
",",
"self",
".",
"api_key",
",",
"self",
".",
"username",
",",
"event_title",
",",
"regex_val",
")",
",",
"verify",
"=",
"self",
".",
"verify",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"json_obj",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"return",
"json_obj",
"else",
":",
"log",
".",
"error",
"(",
"'Non-200 status code from get_event: '",
"'{}'",
".",
"format",
"(",
"r",
".",
"status_code",
")",
")",
"return",
"None"
] |
Search for events with the provided title
Args:
event_title: The title of the event
Returns:
An event JSON object returned from the server with the following:
{
"meta":{
"limit": 20, "next": null, "offset": 0,
"previous": null, "total_count": 3
},
"objects": [{}, {}, etc]
}
or None if an error occurred.
|
[
"Search",
"for",
"events",
"with",
"the",
"provided",
"title"
] |
e770bd81e124eaaeb5f1134ba95f4a35ff345c5a
|
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L471-L501
|
244,129
|
IntegralDefense/critsapi
|
critsapi/critsapi.py
|
CRITsAPI.get_samples
|
def get_samples(self, md5='', sha1='', sha256=''):
"""
Searches for a sample in CRITs. Currently only hashes allowed.
Args:
md5: md5sum
sha1: sha1sum
sha256: sha256sum
Returns:
JSON response or None if not found
"""
params = {'api_key': self.api_key, 'username': self.username}
if md5:
params['c-md5'] = md5
if sha1:
params['c-sha1'] = sha1
if sha256:
params['c-sha256'] = sha256
r = requests.get('{0}/samples/'.format(self.url),
params=params,
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
if 'meta' in result_data:
if 'total_count' in result_data['meta']:
if result_data['meta']['total_count'] > 0:
return result_data
else:
log.error('Non-200 status code: {}'.format(r.status_code))
return None
|
python
|
def get_samples(self, md5='', sha1='', sha256=''):
"""
Searches for a sample in CRITs. Currently only hashes allowed.
Args:
md5: md5sum
sha1: sha1sum
sha256: sha256sum
Returns:
JSON response or None if not found
"""
params = {'api_key': self.api_key, 'username': self.username}
if md5:
params['c-md5'] = md5
if sha1:
params['c-sha1'] = sha1
if sha256:
params['c-sha256'] = sha256
r = requests.get('{0}/samples/'.format(self.url),
params=params,
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
if 'meta' in result_data:
if 'total_count' in result_data['meta']:
if result_data['meta']['total_count'] > 0:
return result_data
else:
log.error('Non-200 status code: {}'.format(r.status_code))
return None
|
[
"def",
"get_samples",
"(",
"self",
",",
"md5",
"=",
"''",
",",
"sha1",
"=",
"''",
",",
"sha256",
"=",
"''",
")",
":",
"params",
"=",
"{",
"'api_key'",
":",
"self",
".",
"api_key",
",",
"'username'",
":",
"self",
".",
"username",
"}",
"if",
"md5",
":",
"params",
"[",
"'c-md5'",
"]",
"=",
"md5",
"if",
"sha1",
":",
"params",
"[",
"'c-sha1'",
"]",
"=",
"sha1",
"if",
"sha256",
":",
"params",
"[",
"'c-sha256'",
"]",
"=",
"sha256",
"r",
"=",
"requests",
".",
"get",
"(",
"'{0}/samples/'",
".",
"format",
"(",
"self",
".",
"url",
")",
",",
"params",
"=",
"params",
",",
"verify",
"=",
"self",
".",
"verify",
",",
"proxies",
"=",
"self",
".",
"proxies",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"result_data",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"if",
"'meta'",
"in",
"result_data",
":",
"if",
"'total_count'",
"in",
"result_data",
"[",
"'meta'",
"]",
":",
"if",
"result_data",
"[",
"'meta'",
"]",
"[",
"'total_count'",
"]",
">",
"0",
":",
"return",
"result_data",
"else",
":",
"log",
".",
"error",
"(",
"'Non-200 status code: {}'",
".",
"format",
"(",
"r",
".",
"status_code",
")",
")",
"return",
"None"
] |
Searches for a sample in CRITs. Currently only hashes allowed.
Args:
md5: md5sum
sha1: sha1sum
sha256: sha256sum
Returns:
JSON response or None if not found
|
[
"Searches",
"for",
"a",
"sample",
"in",
"CRITs",
".",
"Currently",
"only",
"hashes",
"allowed",
"."
] |
e770bd81e124eaaeb5f1134ba95f4a35ff345c5a
|
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L503-L533
|
244,130
|
IntegralDefense/critsapi
|
critsapi/critsapi.py
|
CRITsAPI.get_backdoor
|
def get_backdoor(self, name, version=''):
"""
Searches for the backdoor based on name and version.
Args:
name: The name of the backdoor. This can be an alias.
version: The version.
Returns:
Returns a JSON object contain one or more backdoor results or
None if not found.
"""
params = {}
params['or'] = 1
params['c-name'] = name
params['c-aliases__in'] = name
r = requests.get('{0}/backdoors/'.format(self.url),
params=params,
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
if 'meta' not in result_data:
return None
if 'total_count' not in result_data['meta']:
return None
if result_data['meta']['total_count'] <= 0:
return None
if 'objects' not in result_data:
return None
for backdoor in result_data['objects']:
if 'version' in backdoor:
if backdoor['version'] == version:
return backdoor
else:
log.error('Non-200 status code: {}'.format(r.status_code))
return None
|
python
|
def get_backdoor(self, name, version=''):
"""
Searches for the backdoor based on name and version.
Args:
name: The name of the backdoor. This can be an alias.
version: The version.
Returns:
Returns a JSON object contain one or more backdoor results or
None if not found.
"""
params = {}
params['or'] = 1
params['c-name'] = name
params['c-aliases__in'] = name
r = requests.get('{0}/backdoors/'.format(self.url),
params=params,
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
if 'meta' not in result_data:
return None
if 'total_count' not in result_data['meta']:
return None
if result_data['meta']['total_count'] <= 0:
return None
if 'objects' not in result_data:
return None
for backdoor in result_data['objects']:
if 'version' in backdoor:
if backdoor['version'] == version:
return backdoor
else:
log.error('Non-200 status code: {}'.format(r.status_code))
return None
|
[
"def",
"get_backdoor",
"(",
"self",
",",
"name",
",",
"version",
"=",
"''",
")",
":",
"params",
"=",
"{",
"}",
"params",
"[",
"'or'",
"]",
"=",
"1",
"params",
"[",
"'c-name'",
"]",
"=",
"name",
"params",
"[",
"'c-aliases__in'",
"]",
"=",
"name",
"r",
"=",
"requests",
".",
"get",
"(",
"'{0}/backdoors/'",
".",
"format",
"(",
"self",
".",
"url",
")",
",",
"params",
"=",
"params",
",",
"verify",
"=",
"self",
".",
"verify",
",",
"proxies",
"=",
"self",
".",
"proxies",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"result_data",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"if",
"'meta'",
"not",
"in",
"result_data",
":",
"return",
"None",
"if",
"'total_count'",
"not",
"in",
"result_data",
"[",
"'meta'",
"]",
":",
"return",
"None",
"if",
"result_data",
"[",
"'meta'",
"]",
"[",
"'total_count'",
"]",
"<=",
"0",
":",
"return",
"None",
"if",
"'objects'",
"not",
"in",
"result_data",
":",
"return",
"None",
"for",
"backdoor",
"in",
"result_data",
"[",
"'objects'",
"]",
":",
"if",
"'version'",
"in",
"backdoor",
":",
"if",
"backdoor",
"[",
"'version'",
"]",
"==",
"version",
":",
"return",
"backdoor",
"else",
":",
"log",
".",
"error",
"(",
"'Non-200 status code: {}'",
".",
"format",
"(",
"r",
".",
"status_code",
")",
")",
"return",
"None"
] |
Searches for the backdoor based on name and version.
Args:
name: The name of the backdoor. This can be an alias.
version: The version.
Returns:
Returns a JSON object contain one or more backdoor results or
None if not found.
|
[
"Searches",
"for",
"the",
"backdoor",
"based",
"on",
"name",
"and",
"version",
"."
] |
e770bd81e124eaaeb5f1134ba95f4a35ff345c5a
|
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L563-L598
|
244,131
|
IntegralDefense/critsapi
|
critsapi/critsapi.py
|
CRITsAPI.has_relationship
|
def has_relationship(self, left_id, left_type, right_id, right_type,
rel_type='Related To'):
"""
Checks if the two objects are related
Args:
left_id: The CRITs ID of the first indicator
left_type: The CRITs TLO type of the first indicator
right_id: The CRITs ID of the second indicator
right_type: The CRITs TLO type of the second indicator
rel_type: The relationships type ("Related To", etc)
Returns:
True or False if the relationship exists or not.
"""
data = self.get_object(left_id, left_type)
if not data:
raise CRITsOperationalError('Crits Object not found with id {}'
'and type {}'.format(left_id,
left_type))
if 'relationships' not in data:
return False
for relationship in data['relationships']:
if relationship['relationship'] != rel_type:
continue
if relationship['value'] != right_id:
continue
if relationship['type'] != right_type:
continue
return True
return False
|
python
|
def has_relationship(self, left_id, left_type, right_id, right_type,
rel_type='Related To'):
"""
Checks if the two objects are related
Args:
left_id: The CRITs ID of the first indicator
left_type: The CRITs TLO type of the first indicator
right_id: The CRITs ID of the second indicator
right_type: The CRITs TLO type of the second indicator
rel_type: The relationships type ("Related To", etc)
Returns:
True or False if the relationship exists or not.
"""
data = self.get_object(left_id, left_type)
if not data:
raise CRITsOperationalError('Crits Object not found with id {}'
'and type {}'.format(left_id,
left_type))
if 'relationships' not in data:
return False
for relationship in data['relationships']:
if relationship['relationship'] != rel_type:
continue
if relationship['value'] != right_id:
continue
if relationship['type'] != right_type:
continue
return True
return False
|
[
"def",
"has_relationship",
"(",
"self",
",",
"left_id",
",",
"left_type",
",",
"right_id",
",",
"right_type",
",",
"rel_type",
"=",
"'Related To'",
")",
":",
"data",
"=",
"self",
".",
"get_object",
"(",
"left_id",
",",
"left_type",
")",
"if",
"not",
"data",
":",
"raise",
"CRITsOperationalError",
"(",
"'Crits Object not found with id {}'",
"'and type {}'",
".",
"format",
"(",
"left_id",
",",
"left_type",
")",
")",
"if",
"'relationships'",
"not",
"in",
"data",
":",
"return",
"False",
"for",
"relationship",
"in",
"data",
"[",
"'relationships'",
"]",
":",
"if",
"relationship",
"[",
"'relationship'",
"]",
"!=",
"rel_type",
":",
"continue",
"if",
"relationship",
"[",
"'value'",
"]",
"!=",
"right_id",
":",
"continue",
"if",
"relationship",
"[",
"'type'",
"]",
"!=",
"right_type",
":",
"continue",
"return",
"True",
"return",
"False"
] |
Checks if the two objects are related
Args:
left_id: The CRITs ID of the first indicator
left_type: The CRITs TLO type of the first indicator
right_id: The CRITs ID of the second indicator
right_type: The CRITs TLO type of the second indicator
rel_type: The relationships type ("Related To", etc)
Returns:
True or False if the relationship exists or not.
|
[
"Checks",
"if",
"the",
"two",
"objects",
"are",
"related"
] |
e770bd81e124eaaeb5f1134ba95f4a35ff345c5a
|
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L600-L629
|
244,132
|
IntegralDefense/critsapi
|
critsapi/critsapi.py
|
CRITsAPI.forge_relationship
|
def forge_relationship(self, left_id, left_type, right_id, right_type,
rel_type='Related To', rel_date=None,
rel_confidence='high', rel_reason=''):
"""
Forges a relationship between two TLOs.
Args:
left_id: The CRITs ID of the first indicator
left_type: The CRITs TLO type of the first indicator
right_id: The CRITs ID of the second indicator
right_type: The CRITs TLO type of the second indicator
rel_type: The relationships type ("Related To", etc)
rel_date: datetime.datetime object for the date of the
relationship. If left blank, it will be datetime.datetime.now()
rel_confidence: The relationship confidence (high, medium, low)
rel_reason: Reason for the relationship.
Returns:
True if the relationship was created. False otherwise.
"""
if not rel_date:
rel_date = datetime.datetime.now()
type_trans = self._type_translation(left_type)
submit_url = '{}/{}/{}/'.format(self.url, type_trans, left_id)
params = {
'api_key': self.api_key,
'username': self.username,
}
data = {
'action': 'forge_relationship',
'right_type': right_type,
'right_id': right_id,
'rel_type': rel_type,
'rel_date': rel_date,
'rel_confidence': rel_confidence,
'rel_reason': rel_reason
}
r = requests.patch(submit_url, params=params, data=data,
proxies=self.proxies, verify=self.verify)
if r.status_code == 200:
log.debug('Relationship built successfully: {0} <-> '
'{1}'.format(left_id, right_id))
return True
else:
log.error('Error with status code {0} and message {1} between '
'these indicators: {2} <-> '
'{3}'.format(r.status_code, r.text, left_id, right_id))
return False
|
python
|
def forge_relationship(self, left_id, left_type, right_id, right_type,
rel_type='Related To', rel_date=None,
rel_confidence='high', rel_reason=''):
"""
Forges a relationship between two TLOs.
Args:
left_id: The CRITs ID of the first indicator
left_type: The CRITs TLO type of the first indicator
right_id: The CRITs ID of the second indicator
right_type: The CRITs TLO type of the second indicator
rel_type: The relationships type ("Related To", etc)
rel_date: datetime.datetime object for the date of the
relationship. If left blank, it will be datetime.datetime.now()
rel_confidence: The relationship confidence (high, medium, low)
rel_reason: Reason for the relationship.
Returns:
True if the relationship was created. False otherwise.
"""
if not rel_date:
rel_date = datetime.datetime.now()
type_trans = self._type_translation(left_type)
submit_url = '{}/{}/{}/'.format(self.url, type_trans, left_id)
params = {
'api_key': self.api_key,
'username': self.username,
}
data = {
'action': 'forge_relationship',
'right_type': right_type,
'right_id': right_id,
'rel_type': rel_type,
'rel_date': rel_date,
'rel_confidence': rel_confidence,
'rel_reason': rel_reason
}
r = requests.patch(submit_url, params=params, data=data,
proxies=self.proxies, verify=self.verify)
if r.status_code == 200:
log.debug('Relationship built successfully: {0} <-> '
'{1}'.format(left_id, right_id))
return True
else:
log.error('Error with status code {0} and message {1} between '
'these indicators: {2} <-> '
'{3}'.format(r.status_code, r.text, left_id, right_id))
return False
|
[
"def",
"forge_relationship",
"(",
"self",
",",
"left_id",
",",
"left_type",
",",
"right_id",
",",
"right_type",
",",
"rel_type",
"=",
"'Related To'",
",",
"rel_date",
"=",
"None",
",",
"rel_confidence",
"=",
"'high'",
",",
"rel_reason",
"=",
"''",
")",
":",
"if",
"not",
"rel_date",
":",
"rel_date",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"type_trans",
"=",
"self",
".",
"_type_translation",
"(",
"left_type",
")",
"submit_url",
"=",
"'{}/{}/{}/'",
".",
"format",
"(",
"self",
".",
"url",
",",
"type_trans",
",",
"left_id",
")",
"params",
"=",
"{",
"'api_key'",
":",
"self",
".",
"api_key",
",",
"'username'",
":",
"self",
".",
"username",
",",
"}",
"data",
"=",
"{",
"'action'",
":",
"'forge_relationship'",
",",
"'right_type'",
":",
"right_type",
",",
"'right_id'",
":",
"right_id",
",",
"'rel_type'",
":",
"rel_type",
",",
"'rel_date'",
":",
"rel_date",
",",
"'rel_confidence'",
":",
"rel_confidence",
",",
"'rel_reason'",
":",
"rel_reason",
"}",
"r",
"=",
"requests",
".",
"patch",
"(",
"submit_url",
",",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"verify",
"=",
"self",
".",
"verify",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"log",
".",
"debug",
"(",
"'Relationship built successfully: {0} <-> '",
"'{1}'",
".",
"format",
"(",
"left_id",
",",
"right_id",
")",
")",
"return",
"True",
"else",
":",
"log",
".",
"error",
"(",
"'Error with status code {0} and message {1} between '",
"'these indicators: {2} <-> '",
"'{3}'",
".",
"format",
"(",
"r",
".",
"status_code",
",",
"r",
".",
"text",
",",
"left_id",
",",
"right_id",
")",
")",
"return",
"False"
] |
Forges a relationship between two TLOs.
Args:
left_id: The CRITs ID of the first indicator
left_type: The CRITs TLO type of the first indicator
right_id: The CRITs ID of the second indicator
right_type: The CRITs TLO type of the second indicator
rel_type: The relationships type ("Related To", etc)
rel_date: datetime.datetime object for the date of the
relationship. If left blank, it will be datetime.datetime.now()
rel_confidence: The relationship confidence (high, medium, low)
rel_reason: Reason for the relationship.
Returns:
True if the relationship was created. False otherwise.
|
[
"Forges",
"a",
"relationship",
"between",
"two",
"TLOs",
"."
] |
e770bd81e124eaaeb5f1134ba95f4a35ff345c5a
|
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L631-L680
|
244,133
|
IntegralDefense/critsapi
|
critsapi/critsapi.py
|
CRITsAPI._type_translation
|
def _type_translation(self, str_type):
"""
Internal method to translate the named CRITs TLO type to a URL
specific string.
"""
if str_type == 'Indicator':
return 'indicators'
if str_type == 'Domain':
return 'domains'
if str_type == 'IP':
return 'ips'
if str_type == 'Sample':
return 'samples'
if str_type == 'Event':
return 'events'
if str_type == 'Actor':
return 'actors'
if str_type == 'Email':
return 'emails'
if str_type == 'Backdoor':
return 'backdoors'
raise CRITsInvalidTypeError('Invalid object type specified: '
'{}'.format(str_type))
|
python
|
def _type_translation(self, str_type):
"""
Internal method to translate the named CRITs TLO type to a URL
specific string.
"""
if str_type == 'Indicator':
return 'indicators'
if str_type == 'Domain':
return 'domains'
if str_type == 'IP':
return 'ips'
if str_type == 'Sample':
return 'samples'
if str_type == 'Event':
return 'events'
if str_type == 'Actor':
return 'actors'
if str_type == 'Email':
return 'emails'
if str_type == 'Backdoor':
return 'backdoors'
raise CRITsInvalidTypeError('Invalid object type specified: '
'{}'.format(str_type))
|
[
"def",
"_type_translation",
"(",
"self",
",",
"str_type",
")",
":",
"if",
"str_type",
"==",
"'Indicator'",
":",
"return",
"'indicators'",
"if",
"str_type",
"==",
"'Domain'",
":",
"return",
"'domains'",
"if",
"str_type",
"==",
"'IP'",
":",
"return",
"'ips'",
"if",
"str_type",
"==",
"'Sample'",
":",
"return",
"'samples'",
"if",
"str_type",
"==",
"'Event'",
":",
"return",
"'events'",
"if",
"str_type",
"==",
"'Actor'",
":",
"return",
"'actors'",
"if",
"str_type",
"==",
"'Email'",
":",
"return",
"'emails'",
"if",
"str_type",
"==",
"'Backdoor'",
":",
"return",
"'backdoors'",
"raise",
"CRITsInvalidTypeError",
"(",
"'Invalid object type specified: '",
"'{}'",
".",
"format",
"(",
"str_type",
")",
")"
] |
Internal method to translate the named CRITs TLO type to a URL
specific string.
|
[
"Internal",
"method",
"to",
"translate",
"the",
"named",
"CRITs",
"TLO",
"type",
"to",
"a",
"URL",
"specific",
"string",
"."
] |
e770bd81e124eaaeb5f1134ba95f4a35ff345c5a
|
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L763-L786
|
244,134
|
radjkarl/fancyTools
|
DUMP/intersection.py
|
lineSeqmentsDoIntersect
|
def lineSeqmentsDoIntersect(line1, line2):
"""
Return True if line segment line1 intersects line segment line2 and
line1 and line2 are not parallel.
"""
(x1, y1), (x2, y2) = line1
(u1, v1), (u2, v2) = line2
(a, b), (c, d) = (x2 - x1, u1 - u2), (y2 - y1, v1 - v2)
e, f = u1 - x1, v1 - y1
denom = float(a * d - b * c)
if _near(denom, 0):
# parallel
return False
else:
t = old_div((e * d - b * f), denom)
s = old_div((a * f - e * c), denom)
# When 0<=t<=1 and 0<=s<=1 the point of intersection occurs within the
# line segments
return 0 <= t <= 1 and 0 <= s <= 1
|
python
|
def lineSeqmentsDoIntersect(line1, line2):
"""
Return True if line segment line1 intersects line segment line2 and
line1 and line2 are not parallel.
"""
(x1, y1), (x2, y2) = line1
(u1, v1), (u2, v2) = line2
(a, b), (c, d) = (x2 - x1, u1 - u2), (y2 - y1, v1 - v2)
e, f = u1 - x1, v1 - y1
denom = float(a * d - b * c)
if _near(denom, 0):
# parallel
return False
else:
t = old_div((e * d - b * f), denom)
s = old_div((a * f - e * c), denom)
# When 0<=t<=1 and 0<=s<=1 the point of intersection occurs within the
# line segments
return 0 <= t <= 1 and 0 <= s <= 1
|
[
"def",
"lineSeqmentsDoIntersect",
"(",
"line1",
",",
"line2",
")",
":",
"(",
"x1",
",",
"y1",
")",
",",
"(",
"x2",
",",
"y2",
")",
"=",
"line1",
"(",
"u1",
",",
"v1",
")",
",",
"(",
"u2",
",",
"v2",
")",
"=",
"line2",
"(",
"a",
",",
"b",
")",
",",
"(",
"c",
",",
"d",
")",
"=",
"(",
"x2",
"-",
"x1",
",",
"u1",
"-",
"u2",
")",
",",
"(",
"y2",
"-",
"y1",
",",
"v1",
"-",
"v2",
")",
"e",
",",
"f",
"=",
"u1",
"-",
"x1",
",",
"v1",
"-",
"y1",
"denom",
"=",
"float",
"(",
"a",
"*",
"d",
"-",
"b",
"*",
"c",
")",
"if",
"_near",
"(",
"denom",
",",
"0",
")",
":",
"# parallel\r",
"return",
"False",
"else",
":",
"t",
"=",
"old_div",
"(",
"(",
"e",
"*",
"d",
"-",
"b",
"*",
"f",
")",
",",
"denom",
")",
"s",
"=",
"old_div",
"(",
"(",
"a",
"*",
"f",
"-",
"e",
"*",
"c",
")",
",",
"denom",
")",
"# When 0<=t<=1 and 0<=s<=1 the point of intersection occurs within the\r",
"# line segments\r",
"return",
"0",
"<=",
"t",
"<=",
"1",
"and",
"0",
"<=",
"s",
"<=",
"1"
] |
Return True if line segment line1 intersects line segment line2 and
line1 and line2 are not parallel.
|
[
"Return",
"True",
"if",
"line",
"segment",
"line1",
"intersects",
"line",
"segment",
"line2",
"and",
"line1",
"and",
"line2",
"are",
"not",
"parallel",
"."
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/DUMP/intersection.py#L41-L59
|
244,135
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
mf
|
def mf(pred, props=None, value_fn=None, props_on_match=False, priority=None):
"""
Matcher factory.
"""
if isinstance(pred, BaseMatcher):
ret = pred if props_on_match else pred.props
if isinstance(pred, basestring) or \
type(pred).__name__ == 'SRE_Pattern':
ret = RegexMatcher(pred, props=props, value_fn=value_fn)
if isinstance(pred, set):
return OverlayMatcher(pred, props=props, value_fn=value_fn)
if isinstance(pred, list):
deps = [p for p in pred if isinstance(p, BaseMatcher)]
ret = ListMatcher([mf(p, props_on_match=True) for p in pred],
props=props, value_fn=value_fn,
dependencies=deps)
if priority is not None:
ret.priority = priority
return ret
|
python
|
def mf(pred, props=None, value_fn=None, props_on_match=False, priority=None):
"""
Matcher factory.
"""
if isinstance(pred, BaseMatcher):
ret = pred if props_on_match else pred.props
if isinstance(pred, basestring) or \
type(pred).__name__ == 'SRE_Pattern':
ret = RegexMatcher(pred, props=props, value_fn=value_fn)
if isinstance(pred, set):
return OverlayMatcher(pred, props=props, value_fn=value_fn)
if isinstance(pred, list):
deps = [p for p in pred if isinstance(p, BaseMatcher)]
ret = ListMatcher([mf(p, props_on_match=True) for p in pred],
props=props, value_fn=value_fn,
dependencies=deps)
if priority is not None:
ret.priority = priority
return ret
|
[
"def",
"mf",
"(",
"pred",
",",
"props",
"=",
"None",
",",
"value_fn",
"=",
"None",
",",
"props_on_match",
"=",
"False",
",",
"priority",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"pred",
",",
"BaseMatcher",
")",
":",
"ret",
"=",
"pred",
"if",
"props_on_match",
"else",
"pred",
".",
"props",
"if",
"isinstance",
"(",
"pred",
",",
"basestring",
")",
"or",
"type",
"(",
"pred",
")",
".",
"__name__",
"==",
"'SRE_Pattern'",
":",
"ret",
"=",
"RegexMatcher",
"(",
"pred",
",",
"props",
"=",
"props",
",",
"value_fn",
"=",
"value_fn",
")",
"if",
"isinstance",
"(",
"pred",
",",
"set",
")",
":",
"return",
"OverlayMatcher",
"(",
"pred",
",",
"props",
"=",
"props",
",",
"value_fn",
"=",
"value_fn",
")",
"if",
"isinstance",
"(",
"pred",
",",
"list",
")",
":",
"deps",
"=",
"[",
"p",
"for",
"p",
"in",
"pred",
"if",
"isinstance",
"(",
"p",
",",
"BaseMatcher",
")",
"]",
"ret",
"=",
"ListMatcher",
"(",
"[",
"mf",
"(",
"p",
",",
"props_on_match",
"=",
"True",
")",
"for",
"p",
"in",
"pred",
"]",
",",
"props",
"=",
"props",
",",
"value_fn",
"=",
"value_fn",
",",
"dependencies",
"=",
"deps",
")",
"if",
"priority",
"is",
"not",
"None",
":",
"ret",
".",
"priority",
"=",
"priority",
"return",
"ret"
] |
Matcher factory.
|
[
"Matcher",
"factory",
"."
] |
9ac362d6aef1ea41aff7375af088c6ebef93d0cd
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L255-L279
|
244,136
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
ListMatcher._merge_ovls
|
def _merge_ovls(self, ovls):
"""
Merge ovls and also setup the value and props.
"""
ret = reduce(lambda x, y: x.merge(y), ovls)
ret.value = self.value(ovls=ovls)
ret.set_props(self.props)
return ret
|
python
|
def _merge_ovls(self, ovls):
"""
Merge ovls and also setup the value and props.
"""
ret = reduce(lambda x, y: x.merge(y), ovls)
ret.value = self.value(ovls=ovls)
ret.set_props(self.props)
return ret
|
[
"def",
"_merge_ovls",
"(",
"self",
",",
"ovls",
")",
":",
"ret",
"=",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
".",
"merge",
"(",
"y",
")",
",",
"ovls",
")",
"ret",
".",
"value",
"=",
"self",
".",
"value",
"(",
"ovls",
"=",
"ovls",
")",
"ret",
".",
"set_props",
"(",
"self",
".",
"props",
")",
"return",
"ret"
] |
Merge ovls and also setup the value and props.
|
[
"Merge",
"ovls",
"and",
"also",
"setup",
"the",
"value",
"and",
"props",
"."
] |
9ac362d6aef1ea41aff7375af088c6ebef93d0cd
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L145-L153
|
244,137
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
ListMatcher._fit_overlay_lists
|
def _fit_overlay_lists(self, text, start, matchers, **kw):
"""
Return a list of overlays that start at start.
"""
if matchers:
for o in matchers[0].fit_overlays(text, start):
for rest in self._fit_overlay_lists(text, o.end, matchers[1:]):
yield [o] + rest
else:
yield []
|
python
|
def _fit_overlay_lists(self, text, start, matchers, **kw):
"""
Return a list of overlays that start at start.
"""
if matchers:
for o in matchers[0].fit_overlays(text, start):
for rest in self._fit_overlay_lists(text, o.end, matchers[1:]):
yield [o] + rest
else:
yield []
|
[
"def",
"_fit_overlay_lists",
"(",
"self",
",",
"text",
",",
"start",
",",
"matchers",
",",
"*",
"*",
"kw",
")",
":",
"if",
"matchers",
":",
"for",
"o",
"in",
"matchers",
"[",
"0",
"]",
".",
"fit_overlays",
"(",
"text",
",",
"start",
")",
":",
"for",
"rest",
"in",
"self",
".",
"_fit_overlay_lists",
"(",
"text",
",",
"o",
".",
"end",
",",
"matchers",
"[",
"1",
":",
"]",
")",
":",
"yield",
"[",
"o",
"]",
"+",
"rest",
"else",
":",
"yield",
"[",
"]"
] |
Return a list of overlays that start at start.
|
[
"Return",
"a",
"list",
"of",
"overlays",
"that",
"start",
"at",
"start",
"."
] |
9ac362d6aef1ea41aff7375af088c6ebef93d0cd
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L155-L166
|
244,138
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
ListMatcher.offset_overlays
|
def offset_overlays(self, text, offset=0, run_deps=True, **kw):
"""
The heavy lifting is done by fit_overlays. Override just that for
alternatie implementation.
"""
if run_deps and self.dependencies:
text.overlay(self.dependencies)
for ovlf in self.matchers[0].offset_overlays(text,
goffset=offset,
**kw):
for ovll in self._fit_overlay_lists(text, ovlf.end,
self.matchers[1:]):
yield self._merge_ovls([ovlf] + ovll)
|
python
|
def offset_overlays(self, text, offset=0, run_deps=True, **kw):
"""
The heavy lifting is done by fit_overlays. Override just that for
alternatie implementation.
"""
if run_deps and self.dependencies:
text.overlay(self.dependencies)
for ovlf in self.matchers[0].offset_overlays(text,
goffset=offset,
**kw):
for ovll in self._fit_overlay_lists(text, ovlf.end,
self.matchers[1:]):
yield self._merge_ovls([ovlf] + ovll)
|
[
"def",
"offset_overlays",
"(",
"self",
",",
"text",
",",
"offset",
"=",
"0",
",",
"run_deps",
"=",
"True",
",",
"*",
"*",
"kw",
")",
":",
"if",
"run_deps",
"and",
"self",
".",
"dependencies",
":",
"text",
".",
"overlay",
"(",
"self",
".",
"dependencies",
")",
"for",
"ovlf",
"in",
"self",
".",
"matchers",
"[",
"0",
"]",
".",
"offset_overlays",
"(",
"text",
",",
"goffset",
"=",
"offset",
",",
"*",
"*",
"kw",
")",
":",
"for",
"ovll",
"in",
"self",
".",
"_fit_overlay_lists",
"(",
"text",
",",
"ovlf",
".",
"end",
",",
"self",
".",
"matchers",
"[",
"1",
":",
"]",
")",
":",
"yield",
"self",
".",
"_merge_ovls",
"(",
"[",
"ovlf",
"]",
"+",
"ovll",
")"
] |
The heavy lifting is done by fit_overlays. Override just that for
alternatie implementation.
|
[
"The",
"heavy",
"lifting",
"is",
"done",
"by",
"fit_overlays",
".",
"Override",
"just",
"that",
"for",
"alternatie",
"implementation",
"."
] |
9ac362d6aef1ea41aff7375af088c6ebef93d0cd
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L168-L182
|
244,139
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
MatcherMatcher._maybe_run_matchers
|
def _maybe_run_matchers(self, text, run_matchers):
"""
OverlayedText should be smart enough to not run twice the same
matchers but this is an extra handle of control over that.
"""
if run_matchers is True or \
(run_matchers is not False and text not in self._overlayed_already):
text.overlay(self.matchers)
self._overlayed_already.append(text)
|
python
|
def _maybe_run_matchers(self, text, run_matchers):
"""
OverlayedText should be smart enough to not run twice the same
matchers but this is an extra handle of control over that.
"""
if run_matchers is True or \
(run_matchers is not False and text not in self._overlayed_already):
text.overlay(self.matchers)
self._overlayed_already.append(text)
|
[
"def",
"_maybe_run_matchers",
"(",
"self",
",",
"text",
",",
"run_matchers",
")",
":",
"if",
"run_matchers",
"is",
"True",
"or",
"(",
"run_matchers",
"is",
"not",
"False",
"and",
"text",
"not",
"in",
"self",
".",
"_overlayed_already",
")",
":",
"text",
".",
"overlay",
"(",
"self",
".",
"matchers",
")",
"self",
".",
"_overlayed_already",
".",
"append",
"(",
"text",
")"
] |
OverlayedText should be smart enough to not run twice the same
matchers but this is an extra handle of control over that.
|
[
"OverlayedText",
"should",
"be",
"smart",
"enough",
"to",
"not",
"run",
"twice",
"the",
"same",
"matchers",
"but",
"this",
"is",
"an",
"extra",
"handle",
"of",
"control",
"over",
"that",
"."
] |
9ac362d6aef1ea41aff7375af088c6ebef93d0cd
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L218-L227
|
244,140
|
nefarioustim/parker
|
parker/parsedpage.py
|
ParsedPage.get_nodes_by_selector
|
def get_nodes_by_selector(self, selector, not_selector=None):
"""Return a collection of filtered nodes.
Filtered based on the @selector and @not_selector parameters.
"""
nodes = self.parsed(selector)
if not_selector is not None:
nodes = nodes.not_(not_selector)
return nodes
|
python
|
def get_nodes_by_selector(self, selector, not_selector=None):
"""Return a collection of filtered nodes.
Filtered based on the @selector and @not_selector parameters.
"""
nodes = self.parsed(selector)
if not_selector is not None:
nodes = nodes.not_(not_selector)
return nodes
|
[
"def",
"get_nodes_by_selector",
"(",
"self",
",",
"selector",
",",
"not_selector",
"=",
"None",
")",
":",
"nodes",
"=",
"self",
".",
"parsed",
"(",
"selector",
")",
"if",
"not_selector",
"is",
"not",
"None",
":",
"nodes",
"=",
"nodes",
".",
"not_",
"(",
"not_selector",
")",
"return",
"nodes"
] |
Return a collection of filtered nodes.
Filtered based on the @selector and @not_selector parameters.
|
[
"Return",
"a",
"collection",
"of",
"filtered",
"nodes",
"."
] |
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
|
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/parsedpage.py#L21-L31
|
244,141
|
nefarioustim/parker
|
parker/parsedpage.py
|
ParsedPage.get_text_from_node
|
def get_text_from_node(self, node, regex=None, group=1):
"""Get text from node and filter if necessary."""
text = self._get_stripped_text_from_node(node)
if regex is not None:
text = self._filter_by_regex(regex, text, group)
return text
|
python
|
def get_text_from_node(self, node, regex=None, group=1):
"""Get text from node and filter if necessary."""
text = self._get_stripped_text_from_node(node)
if regex is not None:
text = self._filter_by_regex(regex, text, group)
return text
|
[
"def",
"get_text_from_node",
"(",
"self",
",",
"node",
",",
"regex",
"=",
"None",
",",
"group",
"=",
"1",
")",
":",
"text",
"=",
"self",
".",
"_get_stripped_text_from_node",
"(",
"node",
")",
"if",
"regex",
"is",
"not",
"None",
":",
"text",
"=",
"self",
".",
"_filter_by_regex",
"(",
"regex",
",",
"text",
",",
"group",
")",
"return",
"text"
] |
Get text from node and filter if necessary.
|
[
"Get",
"text",
"from",
"node",
"and",
"filter",
"if",
"necessary",
"."
] |
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
|
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/parsedpage.py#L33-L40
|
244,142
|
nefarioustim/parker
|
parker/parsedpage.py
|
ParsedPage._get_stripped_text_from_node
|
def _get_stripped_text_from_node(self, node):
"""Return the stripped text content of a node."""
return (
node.text_content()
.replace(u"\u00A0", " ")
.replace("\t", "")
.replace("\n", "")
.strip()
)
|
python
|
def _get_stripped_text_from_node(self, node):
"""Return the stripped text content of a node."""
return (
node.text_content()
.replace(u"\u00A0", " ")
.replace("\t", "")
.replace("\n", "")
.strip()
)
|
[
"def",
"_get_stripped_text_from_node",
"(",
"self",
",",
"node",
")",
":",
"return",
"(",
"node",
".",
"text_content",
"(",
")",
".",
"replace",
"(",
"u\"\\u00A0\"",
",",
"\" \"",
")",
".",
"replace",
"(",
"\"\\t\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
")"
] |
Return the stripped text content of a node.
|
[
"Return",
"the",
"stripped",
"text",
"content",
"of",
"a",
"node",
"."
] |
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
|
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/parsedpage.py#L55-L63
|
244,143
|
etcher-be/emiz
|
emiz/avwx/speech.py
|
wind
|
def wind(direction: Number,
speed: Number,
gust: Number,
vardir: typing.List[Number] = None,
unit: str = 'kt') -> str:
"""
Format wind details into a spoken word string
"""
unit = SPOKEN_UNITS.get(unit, unit)
val = translate.wind(direction, speed, gust, vardir, unit,
cardinals=False, spoken=True)
return 'Winds ' + (val or 'unknown')
|
python
|
def wind(direction: Number,
speed: Number,
gust: Number,
vardir: typing.List[Number] = None,
unit: str = 'kt') -> str:
"""
Format wind details into a spoken word string
"""
unit = SPOKEN_UNITS.get(unit, unit)
val = translate.wind(direction, speed, gust, vardir, unit,
cardinals=False, spoken=True)
return 'Winds ' + (val or 'unknown')
|
[
"def",
"wind",
"(",
"direction",
":",
"Number",
",",
"speed",
":",
"Number",
",",
"gust",
":",
"Number",
",",
"vardir",
":",
"typing",
".",
"List",
"[",
"Number",
"]",
"=",
"None",
",",
"unit",
":",
"str",
"=",
"'kt'",
")",
"->",
"str",
":",
"unit",
"=",
"SPOKEN_UNITS",
".",
"get",
"(",
"unit",
",",
"unit",
")",
"val",
"=",
"translate",
".",
"wind",
"(",
"direction",
",",
"speed",
",",
"gust",
",",
"vardir",
",",
"unit",
",",
"cardinals",
"=",
"False",
",",
"spoken",
"=",
"True",
")",
"return",
"'Winds '",
"+",
"(",
"val",
"or",
"'unknown'",
")"
] |
Format wind details into a spoken word string
|
[
"Format",
"wind",
"details",
"into",
"a",
"spoken",
"word",
"string"
] |
1c3e32711921d7e600e85558ffe5d337956372de
|
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/speech.py#L20-L31
|
244,144
|
etcher-be/emiz
|
emiz/avwx/speech.py
|
temperature
|
def temperature(header: str, temp: Number, unit: str = 'C') -> str:
"""
Format temperature details into a spoken word string
"""
if not (temp and temp.value):
return header + ' unknown'
if unit in SPOKEN_UNITS:
unit = SPOKEN_UNITS[unit]
use_s = '' if temp.spoken in ('one', 'minus one') else 's'
return ' '.join((header, temp.spoken, 'degree' + use_s, unit))
|
python
|
def temperature(header: str, temp: Number, unit: str = 'C') -> str:
"""
Format temperature details into a spoken word string
"""
if not (temp and temp.value):
return header + ' unknown'
if unit in SPOKEN_UNITS:
unit = SPOKEN_UNITS[unit]
use_s = '' if temp.spoken in ('one', 'minus one') else 's'
return ' '.join((header, temp.spoken, 'degree' + use_s, unit))
|
[
"def",
"temperature",
"(",
"header",
":",
"str",
",",
"temp",
":",
"Number",
",",
"unit",
":",
"str",
"=",
"'C'",
")",
"->",
"str",
":",
"if",
"not",
"(",
"temp",
"and",
"temp",
".",
"value",
")",
":",
"return",
"header",
"+",
"' unknown'",
"if",
"unit",
"in",
"SPOKEN_UNITS",
":",
"unit",
"=",
"SPOKEN_UNITS",
"[",
"unit",
"]",
"use_s",
"=",
"''",
"if",
"temp",
".",
"spoken",
"in",
"(",
"'one'",
",",
"'minus one'",
")",
"else",
"'s'",
"return",
"' '",
".",
"join",
"(",
"(",
"header",
",",
"temp",
".",
"spoken",
",",
"'degree'",
"+",
"use_s",
",",
"unit",
")",
")"
] |
Format temperature details into a spoken word string
|
[
"Format",
"temperature",
"details",
"into",
"a",
"spoken",
"word",
"string"
] |
1c3e32711921d7e600e85558ffe5d337956372de
|
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/speech.py#L34-L43
|
244,145
|
etcher-be/emiz
|
emiz/avwx/speech.py
|
visibility
|
def visibility(vis: Number, unit: str = 'm') -> str:
"""
Format visibility details into a spoken word string
"""
if not vis:
return 'Visibility unknown'
if vis.value is None or '/' in vis.repr:
ret_vis = vis.spoken
else:
ret_vis = translate.visibility(vis, unit=unit)
if unit == 'm':
unit = 'km'
ret_vis = ret_vis[:ret_vis.find(' (')].lower().replace(unit, '').strip()
ret_vis = core.spoken_number(core.remove_leading_zeros(ret_vis))
ret = 'Visibility ' + ret_vis
if unit in SPOKEN_UNITS:
if '/' in vis.repr and 'half' not in ret:
ret += ' of a'
ret += ' ' + SPOKEN_UNITS[unit]
if not (('one half' in ret and ' and ' not in ret) or 'of a' in ret):
ret += 's'
else:
ret += unit
return ret
|
python
|
def visibility(vis: Number, unit: str = 'm') -> str:
"""
Format visibility details into a spoken word string
"""
if not vis:
return 'Visibility unknown'
if vis.value is None or '/' in vis.repr:
ret_vis = vis.spoken
else:
ret_vis = translate.visibility(vis, unit=unit)
if unit == 'm':
unit = 'km'
ret_vis = ret_vis[:ret_vis.find(' (')].lower().replace(unit, '').strip()
ret_vis = core.spoken_number(core.remove_leading_zeros(ret_vis))
ret = 'Visibility ' + ret_vis
if unit in SPOKEN_UNITS:
if '/' in vis.repr and 'half' not in ret:
ret += ' of a'
ret += ' ' + SPOKEN_UNITS[unit]
if not (('one half' in ret and ' and ' not in ret) or 'of a' in ret):
ret += 's'
else:
ret += unit
return ret
|
[
"def",
"visibility",
"(",
"vis",
":",
"Number",
",",
"unit",
":",
"str",
"=",
"'m'",
")",
"->",
"str",
":",
"if",
"not",
"vis",
":",
"return",
"'Visibility unknown'",
"if",
"vis",
".",
"value",
"is",
"None",
"or",
"'/'",
"in",
"vis",
".",
"repr",
":",
"ret_vis",
"=",
"vis",
".",
"spoken",
"else",
":",
"ret_vis",
"=",
"translate",
".",
"visibility",
"(",
"vis",
",",
"unit",
"=",
"unit",
")",
"if",
"unit",
"==",
"'m'",
":",
"unit",
"=",
"'km'",
"ret_vis",
"=",
"ret_vis",
"[",
":",
"ret_vis",
".",
"find",
"(",
"' ('",
")",
"]",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"unit",
",",
"''",
")",
".",
"strip",
"(",
")",
"ret_vis",
"=",
"core",
".",
"spoken_number",
"(",
"core",
".",
"remove_leading_zeros",
"(",
"ret_vis",
")",
")",
"ret",
"=",
"'Visibility '",
"+",
"ret_vis",
"if",
"unit",
"in",
"SPOKEN_UNITS",
":",
"if",
"'/'",
"in",
"vis",
".",
"repr",
"and",
"'half'",
"not",
"in",
"ret",
":",
"ret",
"+=",
"' of a'",
"ret",
"+=",
"' '",
"+",
"SPOKEN_UNITS",
"[",
"unit",
"]",
"if",
"not",
"(",
"(",
"'one half'",
"in",
"ret",
"and",
"' and '",
"not",
"in",
"ret",
")",
"or",
"'of a'",
"in",
"ret",
")",
":",
"ret",
"+=",
"'s'",
"else",
":",
"ret",
"+=",
"unit",
"return",
"ret"
] |
Format visibility details into a spoken word string
|
[
"Format",
"visibility",
"details",
"into",
"a",
"spoken",
"word",
"string"
] |
1c3e32711921d7e600e85558ffe5d337956372de
|
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/speech.py#L46-L69
|
244,146
|
etcher-be/emiz
|
emiz/avwx/speech.py
|
altimeter
|
def altimeter(alt: Number, unit: str = 'inHg') -> str:
"""
Format altimeter details into a spoken word string
"""
ret = 'Altimeter '
if not alt:
ret += 'unknown'
elif unit == 'inHg':
ret += core.spoken_number(alt.repr[:2]) + ' point ' + core.spoken_number(alt.repr[2:])
elif unit == 'hPa':
ret += core.spoken_number(alt.repr)
return ret
|
python
|
def altimeter(alt: Number, unit: str = 'inHg') -> str:
"""
Format altimeter details into a spoken word string
"""
ret = 'Altimeter '
if not alt:
ret += 'unknown'
elif unit == 'inHg':
ret += core.spoken_number(alt.repr[:2]) + ' point ' + core.spoken_number(alt.repr[2:])
elif unit == 'hPa':
ret += core.spoken_number(alt.repr)
return ret
|
[
"def",
"altimeter",
"(",
"alt",
":",
"Number",
",",
"unit",
":",
"str",
"=",
"'inHg'",
")",
"->",
"str",
":",
"ret",
"=",
"'Altimeter '",
"if",
"not",
"alt",
":",
"ret",
"+=",
"'unknown'",
"elif",
"unit",
"==",
"'inHg'",
":",
"ret",
"+=",
"core",
".",
"spoken_number",
"(",
"alt",
".",
"repr",
"[",
":",
"2",
"]",
")",
"+",
"' point '",
"+",
"core",
".",
"spoken_number",
"(",
"alt",
".",
"repr",
"[",
"2",
":",
"]",
")",
"elif",
"unit",
"==",
"'hPa'",
":",
"ret",
"+=",
"core",
".",
"spoken_number",
"(",
"alt",
".",
"repr",
")",
"return",
"ret"
] |
Format altimeter details into a spoken word string
|
[
"Format",
"altimeter",
"details",
"into",
"a",
"spoken",
"word",
"string"
] |
1c3e32711921d7e600e85558ffe5d337956372de
|
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/speech.py#L72-L83
|
244,147
|
etcher-be/emiz
|
emiz/avwx/speech.py
|
other
|
def other(wxcodes: typing.List[str]) -> str:
"""
Format wx codes into a spoken word string
"""
ret = []
for code in wxcodes:
item = translate.wxcode(code)
if item.startswith('Vicinity'):
item = item.lstrip('Vicinity ') + ' in the Vicinity'
ret.append(item)
return '. '.join(ret)
|
python
|
def other(wxcodes: typing.List[str]) -> str:
"""
Format wx codes into a spoken word string
"""
ret = []
for code in wxcodes:
item = translate.wxcode(code)
if item.startswith('Vicinity'):
item = item.lstrip('Vicinity ') + ' in the Vicinity'
ret.append(item)
return '. '.join(ret)
|
[
"def",
"other",
"(",
"wxcodes",
":",
"typing",
".",
"List",
"[",
"str",
"]",
")",
"->",
"str",
":",
"ret",
"=",
"[",
"]",
"for",
"code",
"in",
"wxcodes",
":",
"item",
"=",
"translate",
".",
"wxcode",
"(",
"code",
")",
"if",
"item",
".",
"startswith",
"(",
"'Vicinity'",
")",
":",
"item",
"=",
"item",
".",
"lstrip",
"(",
"'Vicinity '",
")",
"+",
"' in the Vicinity'",
"ret",
".",
"append",
"(",
"item",
")",
"return",
"'. '",
".",
"join",
"(",
"ret",
")"
] |
Format wx codes into a spoken word string
|
[
"Format",
"wx",
"codes",
"into",
"a",
"spoken",
"word",
"string"
] |
1c3e32711921d7e600e85558ffe5d337956372de
|
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/speech.py#L86-L96
|
244,148
|
etcher-be/emiz
|
emiz/avwx/speech.py
|
type_and_times
|
def type_and_times(type_: str, start: Timestamp, end: Timestamp, probability: Number = None) -> str:
"""
Format line type and times into the beginning of a spoken line string
"""
if not type_:
return ''
if type_ == 'BECMG':
return f"At {start.dt.hour or 'midnight'} zulu becoming"
ret = f"From {start.dt.hour or 'midnight'} to {end.dt.hour or 'midnight'} zulu,"
if probability and probability.value:
ret += f" there's a {probability.value}% chance for"
if type_ == 'INTER':
ret += ' intermittent'
elif type_ == 'TEMPO':
ret += ' temporary'
return ret
|
python
|
def type_and_times(type_: str, start: Timestamp, end: Timestamp, probability: Number = None) -> str:
"""
Format line type and times into the beginning of a spoken line string
"""
if not type_:
return ''
if type_ == 'BECMG':
return f"At {start.dt.hour or 'midnight'} zulu becoming"
ret = f"From {start.dt.hour or 'midnight'} to {end.dt.hour or 'midnight'} zulu,"
if probability and probability.value:
ret += f" there's a {probability.value}% chance for"
if type_ == 'INTER':
ret += ' intermittent'
elif type_ == 'TEMPO':
ret += ' temporary'
return ret
|
[
"def",
"type_and_times",
"(",
"type_",
":",
"str",
",",
"start",
":",
"Timestamp",
",",
"end",
":",
"Timestamp",
",",
"probability",
":",
"Number",
"=",
"None",
")",
"->",
"str",
":",
"if",
"not",
"type_",
":",
"return",
"''",
"if",
"type_",
"==",
"'BECMG'",
":",
"return",
"f\"At {start.dt.hour or 'midnight'} zulu becoming\"",
"ret",
"=",
"f\"From {start.dt.hour or 'midnight'} to {end.dt.hour or 'midnight'} zulu,\"",
"if",
"probability",
"and",
"probability",
".",
"value",
":",
"ret",
"+=",
"f\" there's a {probability.value}% chance for\"",
"if",
"type_",
"==",
"'INTER'",
":",
"ret",
"+=",
"' intermittent'",
"elif",
"type_",
"==",
"'TEMPO'",
":",
"ret",
"+=",
"' temporary'",
"return",
"ret"
] |
Format line type and times into the beginning of a spoken line string
|
[
"Format",
"line",
"type",
"and",
"times",
"into",
"the",
"beginning",
"of",
"a",
"spoken",
"line",
"string"
] |
1c3e32711921d7e600e85558ffe5d337956372de
|
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/speech.py#L99-L114
|
244,149
|
etcher-be/emiz
|
emiz/avwx/speech.py
|
wind_shear
|
def wind_shear(shear: str, unit_alt: str = 'ft', unit_wind: str = 'kt') -> str:
"""
Format wind shear string into a spoken word string
"""
unit_alt = SPOKEN_UNITS.get(unit_alt, unit_alt)
unit_wind = SPOKEN_UNITS.get(unit_wind, unit_wind)
return translate.wind_shear(shear, unit_alt, unit_wind, spoken=True) or 'Wind shear unknown'
|
python
|
def wind_shear(shear: str, unit_alt: str = 'ft', unit_wind: str = 'kt') -> str:
"""
Format wind shear string into a spoken word string
"""
unit_alt = SPOKEN_UNITS.get(unit_alt, unit_alt)
unit_wind = SPOKEN_UNITS.get(unit_wind, unit_wind)
return translate.wind_shear(shear, unit_alt, unit_wind, spoken=True) or 'Wind shear unknown'
|
[
"def",
"wind_shear",
"(",
"shear",
":",
"str",
",",
"unit_alt",
":",
"str",
"=",
"'ft'",
",",
"unit_wind",
":",
"str",
"=",
"'kt'",
")",
"->",
"str",
":",
"unit_alt",
"=",
"SPOKEN_UNITS",
".",
"get",
"(",
"unit_alt",
",",
"unit_alt",
")",
"unit_wind",
"=",
"SPOKEN_UNITS",
".",
"get",
"(",
"unit_wind",
",",
"unit_wind",
")",
"return",
"translate",
".",
"wind_shear",
"(",
"shear",
",",
"unit_alt",
",",
"unit_wind",
",",
"spoken",
"=",
"True",
")",
"or",
"'Wind shear unknown'"
] |
Format wind shear string into a spoken word string
|
[
"Format",
"wind",
"shear",
"string",
"into",
"a",
"spoken",
"word",
"string"
] |
1c3e32711921d7e600e85558ffe5d337956372de
|
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/speech.py#L117-L123
|
244,150
|
etcher-be/emiz
|
emiz/avwx/speech.py
|
metar
|
def metar(data: MetarData, units: Units) -> str:
"""
Convert MetarData into a string for text-to-speech
"""
speech = []
if data.wind_direction and data.wind_speed:
speech.append(wind(data.wind_direction, data.wind_speed,
data.wind_gust, data.wind_variable_direction,
units.wind_speed))
if data.visibility:
speech.append(visibility(data.visibility, units.visibility))
if data.temperature:
speech.append(temperature('Temperature', data.temperature, units.temperature))
if data.dewpoint:
speech.append(temperature('Dew point', data.dewpoint, units.temperature))
if data.altimeter:
speech.append(altimeter(data.altimeter, units.altimeter))
if data.other:
speech.append(other(data.other))
speech.append(translate.clouds(data.clouds,
units.altitude).replace(' - Reported AGL', ''))
return ('. '.join([l for l in speech if l])).replace(',', '.')
|
python
|
def metar(data: MetarData, units: Units) -> str:
"""
Convert MetarData into a string for text-to-speech
"""
speech = []
if data.wind_direction and data.wind_speed:
speech.append(wind(data.wind_direction, data.wind_speed,
data.wind_gust, data.wind_variable_direction,
units.wind_speed))
if data.visibility:
speech.append(visibility(data.visibility, units.visibility))
if data.temperature:
speech.append(temperature('Temperature', data.temperature, units.temperature))
if data.dewpoint:
speech.append(temperature('Dew point', data.dewpoint, units.temperature))
if data.altimeter:
speech.append(altimeter(data.altimeter, units.altimeter))
if data.other:
speech.append(other(data.other))
speech.append(translate.clouds(data.clouds,
units.altitude).replace(' - Reported AGL', ''))
return ('. '.join([l for l in speech if l])).replace(',', '.')
|
[
"def",
"metar",
"(",
"data",
":",
"MetarData",
",",
"units",
":",
"Units",
")",
"->",
"str",
":",
"speech",
"=",
"[",
"]",
"if",
"data",
".",
"wind_direction",
"and",
"data",
".",
"wind_speed",
":",
"speech",
".",
"append",
"(",
"wind",
"(",
"data",
".",
"wind_direction",
",",
"data",
".",
"wind_speed",
",",
"data",
".",
"wind_gust",
",",
"data",
".",
"wind_variable_direction",
",",
"units",
".",
"wind_speed",
")",
")",
"if",
"data",
".",
"visibility",
":",
"speech",
".",
"append",
"(",
"visibility",
"(",
"data",
".",
"visibility",
",",
"units",
".",
"visibility",
")",
")",
"if",
"data",
".",
"temperature",
":",
"speech",
".",
"append",
"(",
"temperature",
"(",
"'Temperature'",
",",
"data",
".",
"temperature",
",",
"units",
".",
"temperature",
")",
")",
"if",
"data",
".",
"dewpoint",
":",
"speech",
".",
"append",
"(",
"temperature",
"(",
"'Dew point'",
",",
"data",
".",
"dewpoint",
",",
"units",
".",
"temperature",
")",
")",
"if",
"data",
".",
"altimeter",
":",
"speech",
".",
"append",
"(",
"altimeter",
"(",
"data",
".",
"altimeter",
",",
"units",
".",
"altimeter",
")",
")",
"if",
"data",
".",
"other",
":",
"speech",
".",
"append",
"(",
"other",
"(",
"data",
".",
"other",
")",
")",
"speech",
".",
"append",
"(",
"translate",
".",
"clouds",
"(",
"data",
".",
"clouds",
",",
"units",
".",
"altitude",
")",
".",
"replace",
"(",
"' - Reported AGL'",
",",
"''",
")",
")",
"return",
"(",
"'. '",
".",
"join",
"(",
"[",
"l",
"for",
"l",
"in",
"speech",
"if",
"l",
"]",
")",
")",
".",
"replace",
"(",
"','",
",",
"'.'",
")"
] |
Convert MetarData into a string for text-to-speech
|
[
"Convert",
"MetarData",
"into",
"a",
"string",
"for",
"text",
"-",
"to",
"-",
"speech"
] |
1c3e32711921d7e600e85558ffe5d337956372de
|
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/speech.py#L126-L147
|
244,151
|
etcher-be/emiz
|
emiz/avwx/speech.py
|
taf_line
|
def taf_line(line: TafLineData, units: Units) -> str:
"""
Convert TafLineData into a string for text-to-speech
"""
speech = []
start = type_and_times(line.type, line.start_time, line.end_time, line.probability)
if line.wind_direction and line.wind_speed:
speech.append(wind(line.wind_direction, line.wind_speed,
line.wind_gust, unit=units.wind_speed))
if line.wind_shear:
speech.append(wind_shear(line.wind_shear, units.altimeter, units.wind_speed))
if line.visibility:
speech.append(visibility(line.visibility, units.visibility))
if line.altimeter:
speech.append(altimeter(line.altimeter, units.altimeter))
if line.other:
speech.append(other(line.other))
speech.append(translate.clouds(line.clouds,
units.altitude).replace(' - Reported AGL', ''))
if line.turbulance:
speech.append(translate.turb_ice(line.turbulance, units.altitude))
if line.icing:
speech.append(translate.turb_ice(line.icing, units.altitude))
return start + ' ' + ('. '.join([l for l in speech if l])).replace(',', '.')
|
python
|
def taf_line(line: TafLineData, units: Units) -> str:
"""
Convert TafLineData into a string for text-to-speech
"""
speech = []
start = type_and_times(line.type, line.start_time, line.end_time, line.probability)
if line.wind_direction and line.wind_speed:
speech.append(wind(line.wind_direction, line.wind_speed,
line.wind_gust, unit=units.wind_speed))
if line.wind_shear:
speech.append(wind_shear(line.wind_shear, units.altimeter, units.wind_speed))
if line.visibility:
speech.append(visibility(line.visibility, units.visibility))
if line.altimeter:
speech.append(altimeter(line.altimeter, units.altimeter))
if line.other:
speech.append(other(line.other))
speech.append(translate.clouds(line.clouds,
units.altitude).replace(' - Reported AGL', ''))
if line.turbulance:
speech.append(translate.turb_ice(line.turbulance, units.altitude))
if line.icing:
speech.append(translate.turb_ice(line.icing, units.altitude))
return start + ' ' + ('. '.join([l for l in speech if l])).replace(',', '.')
|
[
"def",
"taf_line",
"(",
"line",
":",
"TafLineData",
",",
"units",
":",
"Units",
")",
"->",
"str",
":",
"speech",
"=",
"[",
"]",
"start",
"=",
"type_and_times",
"(",
"line",
".",
"type",
",",
"line",
".",
"start_time",
",",
"line",
".",
"end_time",
",",
"line",
".",
"probability",
")",
"if",
"line",
".",
"wind_direction",
"and",
"line",
".",
"wind_speed",
":",
"speech",
".",
"append",
"(",
"wind",
"(",
"line",
".",
"wind_direction",
",",
"line",
".",
"wind_speed",
",",
"line",
".",
"wind_gust",
",",
"unit",
"=",
"units",
".",
"wind_speed",
")",
")",
"if",
"line",
".",
"wind_shear",
":",
"speech",
".",
"append",
"(",
"wind_shear",
"(",
"line",
".",
"wind_shear",
",",
"units",
".",
"altimeter",
",",
"units",
".",
"wind_speed",
")",
")",
"if",
"line",
".",
"visibility",
":",
"speech",
".",
"append",
"(",
"visibility",
"(",
"line",
".",
"visibility",
",",
"units",
".",
"visibility",
")",
")",
"if",
"line",
".",
"altimeter",
":",
"speech",
".",
"append",
"(",
"altimeter",
"(",
"line",
".",
"altimeter",
",",
"units",
".",
"altimeter",
")",
")",
"if",
"line",
".",
"other",
":",
"speech",
".",
"append",
"(",
"other",
"(",
"line",
".",
"other",
")",
")",
"speech",
".",
"append",
"(",
"translate",
".",
"clouds",
"(",
"line",
".",
"clouds",
",",
"units",
".",
"altitude",
")",
".",
"replace",
"(",
"' - Reported AGL'",
",",
"''",
")",
")",
"if",
"line",
".",
"turbulance",
":",
"speech",
".",
"append",
"(",
"translate",
".",
"turb_ice",
"(",
"line",
".",
"turbulance",
",",
"units",
".",
"altitude",
")",
")",
"if",
"line",
".",
"icing",
":",
"speech",
".",
"append",
"(",
"translate",
".",
"turb_ice",
"(",
"line",
".",
"icing",
",",
"units",
".",
"altitude",
")",
")",
"return",
"start",
"+",
"' '",
"+",
"(",
"'. '",
".",
"join",
"(",
"[",
"l",
"for",
"l",
"in",
"speech",
"if",
"l",
"]",
")",
")",
".",
"replace",
"(",
"','",
",",
"'.'",
")"
] |
Convert TafLineData into a string for text-to-speech
|
[
"Convert",
"TafLineData",
"into",
"a",
"string",
"for",
"text",
"-",
"to",
"-",
"speech"
] |
1c3e32711921d7e600e85558ffe5d337956372de
|
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/speech.py#L150-L173
|
244,152
|
etcher-be/emiz
|
emiz/avwx/speech.py
|
taf
|
def taf(data: TafData, units: Units) -> str:
"""
Convert TafData into a string for text-to-speech
"""
try:
month = data.start_time.dt.strftime(r'%B')
day = ordinal(data.start_time.dt.day)
ret = f"Starting on {month} {day} - "
except AttributeError:
ret = ''
return ret + '. '.join([taf_line(line, units) for line in data.forecast])
|
python
|
def taf(data: TafData, units: Units) -> str:
"""
Convert TafData into a string for text-to-speech
"""
try:
month = data.start_time.dt.strftime(r'%B')
day = ordinal(data.start_time.dt.day)
ret = f"Starting on {month} {day} - "
except AttributeError:
ret = ''
return ret + '. '.join([taf_line(line, units) for line in data.forecast])
|
[
"def",
"taf",
"(",
"data",
":",
"TafData",
",",
"units",
":",
"Units",
")",
"->",
"str",
":",
"try",
":",
"month",
"=",
"data",
".",
"start_time",
".",
"dt",
".",
"strftime",
"(",
"r'%B'",
")",
"day",
"=",
"ordinal",
"(",
"data",
".",
"start_time",
".",
"dt",
".",
"day",
")",
"ret",
"=",
"f\"Starting on {month} {day} - \"",
"except",
"AttributeError",
":",
"ret",
"=",
"''",
"return",
"ret",
"+",
"'. '",
".",
"join",
"(",
"[",
"taf_line",
"(",
"line",
",",
"units",
")",
"for",
"line",
"in",
"data",
".",
"forecast",
"]",
")"
] |
Convert TafData into a string for text-to-speech
|
[
"Convert",
"TafData",
"into",
"a",
"string",
"for",
"text",
"-",
"to",
"-",
"speech"
] |
1c3e32711921d7e600e85558ffe5d337956372de
|
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/speech.py#L176-L186
|
244,153
|
fogcitymarathoner/s3_mysql_backup
|
s3_mysql_backup/scripts/get_bucket.py
|
get_bucket
|
def get_bucket():
"""
Get listing of S3 Bucket
"""
args = parser.parse_args()
bucket = s3_bucket(args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name)
for b in bucket.list():
print(''.join([i if ord(i) < 128 else ' ' for i in b.name]))
|
python
|
def get_bucket():
"""
Get listing of S3 Bucket
"""
args = parser.parse_args()
bucket = s3_bucket(args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name)
for b in bucket.list():
print(''.join([i if ord(i) < 128 else ' ' for i in b.name]))
|
[
"def",
"get_bucket",
"(",
")",
":",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"bucket",
"=",
"s3_bucket",
"(",
"args",
".",
"aws_access_key_id",
",",
"args",
".",
"aws_secret_access_key",
",",
"args",
".",
"bucket_name",
")",
"for",
"b",
"in",
"bucket",
".",
"list",
"(",
")",
":",
"print",
"(",
"''",
".",
"join",
"(",
"[",
"i",
"if",
"ord",
"(",
"i",
")",
"<",
"128",
"else",
"' '",
"for",
"i",
"in",
"b",
".",
"name",
"]",
")",
")"
] |
Get listing of S3 Bucket
|
[
"Get",
"listing",
"of",
"S3",
"Bucket"
] |
8a0fb3e51a7b873eb4287d4954548a0dbab0e734
|
https://github.com/fogcitymarathoner/s3_mysql_backup/blob/8a0fb3e51a7b873eb4287d4954548a0dbab0e734/s3_mysql_backup/scripts/get_bucket.py#L13-L21
|
244,154
|
collectiveacuity/labPack
|
labpack/mapping/data.py
|
walk_data
|
def walk_data(input_data):
''' a generator function for retrieving data in a nested dictionary
:param input_data: dictionary or list with nested data
:return: string with dot_path, object with value of endpoint
'''
def _walk_dict(input_dict, path_to_root):
if not path_to_root:
yield '.', input_dict
for key, value in input_dict.items():
key_path = '%s.%s' % (path_to_root, key)
type_name = value.__class__.__name__
yield key_path, value
if type_name == 'dict':
for dot_path, value in _walk_dict(value, key_path):
yield dot_path, value
elif type_name == 'list':
for dot_path, value in _walk_list(value, key_path):
yield dot_path, value
def _walk_list(input_list, path_to_root):
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
type_name = input_list[i].__class__.__name__
yield item_path, input_list[i]
if type_name == 'dict':
for dot_path, value in _walk_dict(input_list[i], item_path):
yield dot_path, value
elif type_name == 'list':
for dot_path, value in _walk_list(input_list[i], item_path):
yield dot_path, value
if isinstance(input_data, dict):
for dot_path, value in _walk_dict(input_data, ''):
yield dot_path, value
elif isinstance(input_data, list):
for dot_path, value in _walk_list(input_data, ''):
yield dot_path, value
else:
raise ValueError('walk_data() input_data argument must be a list or dictionary.')
|
python
|
def walk_data(input_data):
''' a generator function for retrieving data in a nested dictionary
:param input_data: dictionary or list with nested data
:return: string with dot_path, object with value of endpoint
'''
def _walk_dict(input_dict, path_to_root):
if not path_to_root:
yield '.', input_dict
for key, value in input_dict.items():
key_path = '%s.%s' % (path_to_root, key)
type_name = value.__class__.__name__
yield key_path, value
if type_name == 'dict':
for dot_path, value in _walk_dict(value, key_path):
yield dot_path, value
elif type_name == 'list':
for dot_path, value in _walk_list(value, key_path):
yield dot_path, value
def _walk_list(input_list, path_to_root):
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
type_name = input_list[i].__class__.__name__
yield item_path, input_list[i]
if type_name == 'dict':
for dot_path, value in _walk_dict(input_list[i], item_path):
yield dot_path, value
elif type_name == 'list':
for dot_path, value in _walk_list(input_list[i], item_path):
yield dot_path, value
if isinstance(input_data, dict):
for dot_path, value in _walk_dict(input_data, ''):
yield dot_path, value
elif isinstance(input_data, list):
for dot_path, value in _walk_list(input_data, ''):
yield dot_path, value
else:
raise ValueError('walk_data() input_data argument must be a list or dictionary.')
|
[
"def",
"walk_data",
"(",
"input_data",
")",
":",
"def",
"_walk_dict",
"(",
"input_dict",
",",
"path_to_root",
")",
":",
"if",
"not",
"path_to_root",
":",
"yield",
"'.'",
",",
"input_dict",
"for",
"key",
",",
"value",
"in",
"input_dict",
".",
"items",
"(",
")",
":",
"key_path",
"=",
"'%s.%s'",
"%",
"(",
"path_to_root",
",",
"key",
")",
"type_name",
"=",
"value",
".",
"__class__",
".",
"__name__",
"yield",
"key_path",
",",
"value",
"if",
"type_name",
"==",
"'dict'",
":",
"for",
"dot_path",
",",
"value",
"in",
"_walk_dict",
"(",
"value",
",",
"key_path",
")",
":",
"yield",
"dot_path",
",",
"value",
"elif",
"type_name",
"==",
"'list'",
":",
"for",
"dot_path",
",",
"value",
"in",
"_walk_list",
"(",
"value",
",",
"key_path",
")",
":",
"yield",
"dot_path",
",",
"value",
"def",
"_walk_list",
"(",
"input_list",
",",
"path_to_root",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"input_list",
")",
")",
":",
"item_path",
"=",
"'%s[%s]'",
"%",
"(",
"path_to_root",
",",
"i",
")",
"type_name",
"=",
"input_list",
"[",
"i",
"]",
".",
"__class__",
".",
"__name__",
"yield",
"item_path",
",",
"input_list",
"[",
"i",
"]",
"if",
"type_name",
"==",
"'dict'",
":",
"for",
"dot_path",
",",
"value",
"in",
"_walk_dict",
"(",
"input_list",
"[",
"i",
"]",
",",
"item_path",
")",
":",
"yield",
"dot_path",
",",
"value",
"elif",
"type_name",
"==",
"'list'",
":",
"for",
"dot_path",
",",
"value",
"in",
"_walk_list",
"(",
"input_list",
"[",
"i",
"]",
",",
"item_path",
")",
":",
"yield",
"dot_path",
",",
"value",
"if",
"isinstance",
"(",
"input_data",
",",
"dict",
")",
":",
"for",
"dot_path",
",",
"value",
"in",
"_walk_dict",
"(",
"input_data",
",",
"''",
")",
":",
"yield",
"dot_path",
",",
"value",
"elif",
"isinstance",
"(",
"input_data",
",",
"list",
")",
":",
"for",
"dot_path",
",",
"value",
"in",
"_walk_list",
"(",
"input_data",
",",
"''",
")",
":",
"yield",
"dot_path",
",",
"value",
"else",
":",
"raise",
"ValueError",
"(",
"'walk_data() input_data argument must be a list or dictionary.'",
")"
] |
a generator function for retrieving data in a nested dictionary
:param input_data: dictionary or list with nested data
:return: string with dot_path, object with value of endpoint
|
[
"a",
"generator",
"function",
"for",
"retrieving",
"data",
"in",
"a",
"nested",
"dictionary"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/mapping/data.py#L5-L46
|
244,155
|
collectiveacuity/labPack
|
labpack/mapping/data.py
|
transform_data
|
def transform_data(function, input_data):
''' a function to apply a function to each value in a nested dictionary
:param function: callable function with a single input of any datatype
:param input_data: dictionary or list with nested data to transform
:return: dictionary or list with data transformed by function
'''
# construct copy
try:
from copy import deepcopy
output_data = deepcopy(input_data)
except:
raise ValueError('transform_data() input_data argument cannot contain module datatypes.')
# walk over data and apply function
for dot_path, value in walk_data(input_data):
current_endpoint = output_data
segment_list = segment_path(dot_path)
segment = None
if segment_list:
for i in range(len(segment_list)):
try:
segment = int(segment_list[i])
except:
segment = segment_list[i]
if i + 1 == len(segment_list):
pass
else:
current_endpoint = current_endpoint[segment]
current_endpoint[segment] = function(value)
return output_data
|
python
|
def transform_data(function, input_data):
''' a function to apply a function to each value in a nested dictionary
:param function: callable function with a single input of any datatype
:param input_data: dictionary or list with nested data to transform
:return: dictionary or list with data transformed by function
'''
# construct copy
try:
from copy import deepcopy
output_data = deepcopy(input_data)
except:
raise ValueError('transform_data() input_data argument cannot contain module datatypes.')
# walk over data and apply function
for dot_path, value in walk_data(input_data):
current_endpoint = output_data
segment_list = segment_path(dot_path)
segment = None
if segment_list:
for i in range(len(segment_list)):
try:
segment = int(segment_list[i])
except:
segment = segment_list[i]
if i + 1 == len(segment_list):
pass
else:
current_endpoint = current_endpoint[segment]
current_endpoint[segment] = function(value)
return output_data
|
[
"def",
"transform_data",
"(",
"function",
",",
"input_data",
")",
":",
"# construct copy",
"try",
":",
"from",
"copy",
"import",
"deepcopy",
"output_data",
"=",
"deepcopy",
"(",
"input_data",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'transform_data() input_data argument cannot contain module datatypes.'",
")",
"# walk over data and apply function",
"for",
"dot_path",
",",
"value",
"in",
"walk_data",
"(",
"input_data",
")",
":",
"current_endpoint",
"=",
"output_data",
"segment_list",
"=",
"segment_path",
"(",
"dot_path",
")",
"segment",
"=",
"None",
"if",
"segment_list",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"segment_list",
")",
")",
":",
"try",
":",
"segment",
"=",
"int",
"(",
"segment_list",
"[",
"i",
"]",
")",
"except",
":",
"segment",
"=",
"segment_list",
"[",
"i",
"]",
"if",
"i",
"+",
"1",
"==",
"len",
"(",
"segment_list",
")",
":",
"pass",
"else",
":",
"current_endpoint",
"=",
"current_endpoint",
"[",
"segment",
"]",
"current_endpoint",
"[",
"segment",
"]",
"=",
"function",
"(",
"value",
")",
"return",
"output_data"
] |
a function to apply a function to each value in a nested dictionary
:param function: callable function with a single input of any datatype
:param input_data: dictionary or list with nested data to transform
:return: dictionary or list with data transformed by function
|
[
"a",
"function",
"to",
"apply",
"a",
"function",
"to",
"each",
"value",
"in",
"a",
"nested",
"dictionary"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/mapping/data.py#L69-L102
|
244,156
|
collectiveacuity/labPack
|
labpack/mapping/data.py
|
clean_data
|
def clean_data(input_value):
''' a function to transform a value into a json or yaml valid datatype
:param input_value: object of any datatype
:return: object with json valid datatype
'''
# pass normal json/yaml datatypes
if input_value.__class__.__name__ in ['bool', 'str', 'float', 'int', 'NoneType']:
pass
# transform byte data to base64 encoded string
elif isinstance(input_value, bytes):
from base64 import b64encode
input_value = b64encode(input_value).decode()
# convert tuples and sets into lists
elif isinstance(input_value, tuple) or isinstance(input_value, set):
new_list = []
new_list.extend(input_value)
input_value = transform_data(clean_data, new_list)
# recurse through dictionaries and lists
elif isinstance(input_value, dict) or isinstance(input_value, list):
input_value = transform_data(clean_data, input_value)
# convert to string all python objects and callables
else:
input_value = str(input_value)
return input_value
|
python
|
def clean_data(input_value):
''' a function to transform a value into a json or yaml valid datatype
:param input_value: object of any datatype
:return: object with json valid datatype
'''
# pass normal json/yaml datatypes
if input_value.__class__.__name__ in ['bool', 'str', 'float', 'int', 'NoneType']:
pass
# transform byte data to base64 encoded string
elif isinstance(input_value, bytes):
from base64 import b64encode
input_value = b64encode(input_value).decode()
# convert tuples and sets into lists
elif isinstance(input_value, tuple) or isinstance(input_value, set):
new_list = []
new_list.extend(input_value)
input_value = transform_data(clean_data, new_list)
# recurse through dictionaries and lists
elif isinstance(input_value, dict) or isinstance(input_value, list):
input_value = transform_data(clean_data, input_value)
# convert to string all python objects and callables
else:
input_value = str(input_value)
return input_value
|
[
"def",
"clean_data",
"(",
"input_value",
")",
":",
"# pass normal json/yaml datatypes",
"if",
"input_value",
".",
"__class__",
".",
"__name__",
"in",
"[",
"'bool'",
",",
"'str'",
",",
"'float'",
",",
"'int'",
",",
"'NoneType'",
"]",
":",
"pass",
"# transform byte data to base64 encoded string",
"elif",
"isinstance",
"(",
"input_value",
",",
"bytes",
")",
":",
"from",
"base64",
"import",
"b64encode",
"input_value",
"=",
"b64encode",
"(",
"input_value",
")",
".",
"decode",
"(",
")",
"# convert tuples and sets into lists",
"elif",
"isinstance",
"(",
"input_value",
",",
"tuple",
")",
"or",
"isinstance",
"(",
"input_value",
",",
"set",
")",
":",
"new_list",
"=",
"[",
"]",
"new_list",
".",
"extend",
"(",
"input_value",
")",
"input_value",
"=",
"transform_data",
"(",
"clean_data",
",",
"new_list",
")",
"# recurse through dictionaries and lists",
"elif",
"isinstance",
"(",
"input_value",
",",
"dict",
")",
"or",
"isinstance",
"(",
"input_value",
",",
"list",
")",
":",
"input_value",
"=",
"transform_data",
"(",
"clean_data",
",",
"input_value",
")",
"# convert to string all python objects and callables",
"else",
":",
"input_value",
"=",
"str",
"(",
"input_value",
")",
"return",
"input_value"
] |
a function to transform a value into a json or yaml valid datatype
:param input_value: object of any datatype
:return: object with json valid datatype
|
[
"a",
"function",
"to",
"transform",
"a",
"value",
"into",
"a",
"json",
"or",
"yaml",
"valid",
"datatype"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/mapping/data.py#L104-L135
|
244,157
|
collectiveacuity/labPack
|
labpack/mapping/data.py
|
reconstruct_dict
|
def reconstruct_dict(dot_paths, values):
''' a method for reconstructing a dictionary from the values along dot paths '''
output_dict = {}
for i in range(len(dot_paths)):
if i + 1 <= len(values):
path_segments = segment_path(dot_paths[i])
current_nest = output_dict
for j in range(len(path_segments)):
key_name = path_segments[j]
try:
key_name = int(key_name)
except:
pass
if j + 1 == len(path_segments):
if isinstance(key_name, int):
current_nest.append(values[i])
else:
current_nest[key_name] = values[i]
else:
next_key = path_segments[j+1]
try:
next_key = int(next_key)
except:
pass
if isinstance(next_key, int):
if not key_name in current_nest.keys():
current_nest[key_name] = []
current_nest = current_nest[key_name]
else:
if isinstance(key_name, int):
current_nest.append({})
current_nest = current_nest[len(current_nest) - 1]
else:
if not key_name in current_nest.keys():
current_nest[key_name] = {}
current_nest = current_nest[key_name]
return output_dict
|
python
|
def reconstruct_dict(dot_paths, values):
''' a method for reconstructing a dictionary from the values along dot paths '''
output_dict = {}
for i in range(len(dot_paths)):
if i + 1 <= len(values):
path_segments = segment_path(dot_paths[i])
current_nest = output_dict
for j in range(len(path_segments)):
key_name = path_segments[j]
try:
key_name = int(key_name)
except:
pass
if j + 1 == len(path_segments):
if isinstance(key_name, int):
current_nest.append(values[i])
else:
current_nest[key_name] = values[i]
else:
next_key = path_segments[j+1]
try:
next_key = int(next_key)
except:
pass
if isinstance(next_key, int):
if not key_name in current_nest.keys():
current_nest[key_name] = []
current_nest = current_nest[key_name]
else:
if isinstance(key_name, int):
current_nest.append({})
current_nest = current_nest[len(current_nest) - 1]
else:
if not key_name in current_nest.keys():
current_nest[key_name] = {}
current_nest = current_nest[key_name]
return output_dict
|
[
"def",
"reconstruct_dict",
"(",
"dot_paths",
",",
"values",
")",
":",
"output_dict",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"dot_paths",
")",
")",
":",
"if",
"i",
"+",
"1",
"<=",
"len",
"(",
"values",
")",
":",
"path_segments",
"=",
"segment_path",
"(",
"dot_paths",
"[",
"i",
"]",
")",
"current_nest",
"=",
"output_dict",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"path_segments",
")",
")",
":",
"key_name",
"=",
"path_segments",
"[",
"j",
"]",
"try",
":",
"key_name",
"=",
"int",
"(",
"key_name",
")",
"except",
":",
"pass",
"if",
"j",
"+",
"1",
"==",
"len",
"(",
"path_segments",
")",
":",
"if",
"isinstance",
"(",
"key_name",
",",
"int",
")",
":",
"current_nest",
".",
"append",
"(",
"values",
"[",
"i",
"]",
")",
"else",
":",
"current_nest",
"[",
"key_name",
"]",
"=",
"values",
"[",
"i",
"]",
"else",
":",
"next_key",
"=",
"path_segments",
"[",
"j",
"+",
"1",
"]",
"try",
":",
"next_key",
"=",
"int",
"(",
"next_key",
")",
"except",
":",
"pass",
"if",
"isinstance",
"(",
"next_key",
",",
"int",
")",
":",
"if",
"not",
"key_name",
"in",
"current_nest",
".",
"keys",
"(",
")",
":",
"current_nest",
"[",
"key_name",
"]",
"=",
"[",
"]",
"current_nest",
"=",
"current_nest",
"[",
"key_name",
"]",
"else",
":",
"if",
"isinstance",
"(",
"key_name",
",",
"int",
")",
":",
"current_nest",
".",
"append",
"(",
"{",
"}",
")",
"current_nest",
"=",
"current_nest",
"[",
"len",
"(",
"current_nest",
")",
"-",
"1",
"]",
"else",
":",
"if",
"not",
"key_name",
"in",
"current_nest",
".",
"keys",
"(",
")",
":",
"current_nest",
"[",
"key_name",
"]",
"=",
"{",
"}",
"current_nest",
"=",
"current_nest",
"[",
"key_name",
"]",
"return",
"output_dict"
] |
a method for reconstructing a dictionary from the values along dot paths
|
[
"a",
"method",
"for",
"reconstructing",
"a",
"dictionary",
"from",
"the",
"values",
"along",
"dot",
"paths"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/mapping/data.py#L137-L177
|
244,158
|
mbodenhamer/syn
|
syn/base_utils/order.py
|
topological_sorting
|
def topological_sorting(nodes, relations):
'''An implementation of Kahn's algorithm.
'''
ret = []
nodes = set(nodes) | _nodes(relations)
inc = _incoming(relations)
out = _outgoing(relations)
free = _free_nodes(nodes, inc)
while free:
n = free.pop()
ret.append(n)
out_n = list(out[n])
for m in out_n:
out[n].remove(m)
inc[m].remove(n)
if _is_free(m, inc):
free.add(m)
if not all(_is_free(node, inc) and _is_free(node, out) for node in nodes):
raise ValueError("Cycle detected")
return ret
|
python
|
def topological_sorting(nodes, relations):
'''An implementation of Kahn's algorithm.
'''
ret = []
nodes = set(nodes) | _nodes(relations)
inc = _incoming(relations)
out = _outgoing(relations)
free = _free_nodes(nodes, inc)
while free:
n = free.pop()
ret.append(n)
out_n = list(out[n])
for m in out_n:
out[n].remove(m)
inc[m].remove(n)
if _is_free(m, inc):
free.add(m)
if not all(_is_free(node, inc) and _is_free(node, out) for node in nodes):
raise ValueError("Cycle detected")
return ret
|
[
"def",
"topological_sorting",
"(",
"nodes",
",",
"relations",
")",
":",
"ret",
"=",
"[",
"]",
"nodes",
"=",
"set",
"(",
"nodes",
")",
"|",
"_nodes",
"(",
"relations",
")",
"inc",
"=",
"_incoming",
"(",
"relations",
")",
"out",
"=",
"_outgoing",
"(",
"relations",
")",
"free",
"=",
"_free_nodes",
"(",
"nodes",
",",
"inc",
")",
"while",
"free",
":",
"n",
"=",
"free",
".",
"pop",
"(",
")",
"ret",
".",
"append",
"(",
"n",
")",
"out_n",
"=",
"list",
"(",
"out",
"[",
"n",
"]",
")",
"for",
"m",
"in",
"out_n",
":",
"out",
"[",
"n",
"]",
".",
"remove",
"(",
"m",
")",
"inc",
"[",
"m",
"]",
".",
"remove",
"(",
"n",
")",
"if",
"_is_free",
"(",
"m",
",",
"inc",
")",
":",
"free",
".",
"add",
"(",
"m",
")",
"if",
"not",
"all",
"(",
"_is_free",
"(",
"node",
",",
"inc",
")",
"and",
"_is_free",
"(",
"node",
",",
"out",
")",
"for",
"node",
"in",
"nodes",
")",
":",
"raise",
"ValueError",
"(",
"\"Cycle detected\"",
")",
"return",
"ret"
] |
An implementation of Kahn's algorithm.
|
[
"An",
"implementation",
"of",
"Kahn",
"s",
"algorithm",
"."
] |
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
|
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/base_utils/order.py#L47-L69
|
244,159
|
ronaldguillen/wave
|
wave/exceptions.py
|
_force_text_recursive
|
def _force_text_recursive(data):
"""
Descend into a nested data structure, forcing any
lazy translation strings into plain text.
"""
if isinstance(data, list):
ret = [
_force_text_recursive(item) for item in data
]
if isinstance(data, ReturnList):
return ReturnList(ret, serializer=data.serializer)
return data
elif isinstance(data, dict):
ret = {
key: _force_text_recursive(value)
for key, value in data.items()
}
if isinstance(data, ReturnDict):
return ReturnDict(ret, serializer=data.serializer)
return data
return force_text(data)
|
python
|
def _force_text_recursive(data):
"""
Descend into a nested data structure, forcing any
lazy translation strings into plain text.
"""
if isinstance(data, list):
ret = [
_force_text_recursive(item) for item in data
]
if isinstance(data, ReturnList):
return ReturnList(ret, serializer=data.serializer)
return data
elif isinstance(data, dict):
ret = {
key: _force_text_recursive(value)
for key, value in data.items()
}
if isinstance(data, ReturnDict):
return ReturnDict(ret, serializer=data.serializer)
return data
return force_text(data)
|
[
"def",
"_force_text_recursive",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"ret",
"=",
"[",
"_force_text_recursive",
"(",
"item",
")",
"for",
"item",
"in",
"data",
"]",
"if",
"isinstance",
"(",
"data",
",",
"ReturnList",
")",
":",
"return",
"ReturnList",
"(",
"ret",
",",
"serializer",
"=",
"data",
".",
"serializer",
")",
"return",
"data",
"elif",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"ret",
"=",
"{",
"key",
":",
"_force_text_recursive",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
"}",
"if",
"isinstance",
"(",
"data",
",",
"ReturnDict",
")",
":",
"return",
"ReturnDict",
"(",
"ret",
",",
"serializer",
"=",
"data",
".",
"serializer",
")",
"return",
"data",
"return",
"force_text",
"(",
"data",
")"
] |
Descend into a nested data structure, forcing any
lazy translation strings into plain text.
|
[
"Descend",
"into",
"a",
"nested",
"data",
"structure",
"forcing",
"any",
"lazy",
"translation",
"strings",
"into",
"plain",
"text",
"."
] |
20bb979c917f7634d8257992e6d449dc751256a9
|
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/exceptions.py#L20-L40
|
244,160
|
OlivierB/Bashutils
|
bashutils/colors.py
|
color_text
|
def color_text(text, color="none", bcolor="none", effect="none"):
"""
Return a formated text with bash color
"""
istty = False
try:
istty = sys.stdout.isatty()
except:
pass
if not istty or not COLOR_ON:
return text
else:
if not effect in COLOR_EFFET.keys():
effect = "none"
if not color in COLOR_CODE_TEXT.keys():
color = "none"
if not bcolor in COLOR_CODE_BG.keys():
bcolor = "none"
v_effect = COLOR_EFFET[effect]
v_color = COLOR_CODE_TEXT[color]
v_bcolor = COLOR_CODE_BG[bcolor]
if effect == "none" and color == "none" and bcolor == "none":
return text
else:
return "\033[%d;%d;%dm" % (v_effect, v_color, v_bcolor) + text + COLOR_RESET
|
python
|
def color_text(text, color="none", bcolor="none", effect="none"):
"""
Return a formated text with bash color
"""
istty = False
try:
istty = sys.stdout.isatty()
except:
pass
if not istty or not COLOR_ON:
return text
else:
if not effect in COLOR_EFFET.keys():
effect = "none"
if not color in COLOR_CODE_TEXT.keys():
color = "none"
if not bcolor in COLOR_CODE_BG.keys():
bcolor = "none"
v_effect = COLOR_EFFET[effect]
v_color = COLOR_CODE_TEXT[color]
v_bcolor = COLOR_CODE_BG[bcolor]
if effect == "none" and color == "none" and bcolor == "none":
return text
else:
return "\033[%d;%d;%dm" % (v_effect, v_color, v_bcolor) + text + COLOR_RESET
|
[
"def",
"color_text",
"(",
"text",
",",
"color",
"=",
"\"none\"",
",",
"bcolor",
"=",
"\"none\"",
",",
"effect",
"=",
"\"none\"",
")",
":",
"istty",
"=",
"False",
"try",
":",
"istty",
"=",
"sys",
".",
"stdout",
".",
"isatty",
"(",
")",
"except",
":",
"pass",
"if",
"not",
"istty",
"or",
"not",
"COLOR_ON",
":",
"return",
"text",
"else",
":",
"if",
"not",
"effect",
"in",
"COLOR_EFFET",
".",
"keys",
"(",
")",
":",
"effect",
"=",
"\"none\"",
"if",
"not",
"color",
"in",
"COLOR_CODE_TEXT",
".",
"keys",
"(",
")",
":",
"color",
"=",
"\"none\"",
"if",
"not",
"bcolor",
"in",
"COLOR_CODE_BG",
".",
"keys",
"(",
")",
":",
"bcolor",
"=",
"\"none\"",
"v_effect",
"=",
"COLOR_EFFET",
"[",
"effect",
"]",
"v_color",
"=",
"COLOR_CODE_TEXT",
"[",
"color",
"]",
"v_bcolor",
"=",
"COLOR_CODE_BG",
"[",
"bcolor",
"]",
"if",
"effect",
"==",
"\"none\"",
"and",
"color",
"==",
"\"none\"",
"and",
"bcolor",
"==",
"\"none\"",
":",
"return",
"text",
"else",
":",
"return",
"\"\\033[%d;%d;%dm\"",
"%",
"(",
"v_effect",
",",
"v_color",
",",
"v_bcolor",
")",
"+",
"text",
"+",
"COLOR_RESET"
] |
Return a formated text with bash color
|
[
"Return",
"a",
"formated",
"text",
"with",
"bash",
"color"
] |
487762049f5d09f14f8a6c764bc0a823f332d8a1
|
https://github.com/OlivierB/Bashutils/blob/487762049f5d09f14f8a6c764bc0a823f332d8a1/bashutils/colors.py#L68-L94
|
244,161
|
radjkarl/fancyTools
|
fancytools/os/countLines.py
|
countLines
|
def countLines(filename, buf_size=1048576):
"""
fast counting to the lines of a given filename
through only reading out a limited buffer
"""
f = open(filename)
try:
lines = 1
read_f = f.read # loop optimization
buf = read_f(buf_size)
# Empty file
if not buf:
return 0
while buf:
lines += buf.count('\n')
buf = read_f(buf_size)
return lines
finally:
f.close()
|
python
|
def countLines(filename, buf_size=1048576):
"""
fast counting to the lines of a given filename
through only reading out a limited buffer
"""
f = open(filename)
try:
lines = 1
read_f = f.read # loop optimization
buf = read_f(buf_size)
# Empty file
if not buf:
return 0
while buf:
lines += buf.count('\n')
buf = read_f(buf_size)
return lines
finally:
f.close()
|
[
"def",
"countLines",
"(",
"filename",
",",
"buf_size",
"=",
"1048576",
")",
":",
"f",
"=",
"open",
"(",
"filename",
")",
"try",
":",
"lines",
"=",
"1",
"read_f",
"=",
"f",
".",
"read",
"# loop optimization",
"buf",
"=",
"read_f",
"(",
"buf_size",
")",
"# Empty file",
"if",
"not",
"buf",
":",
"return",
"0",
"while",
"buf",
":",
"lines",
"+=",
"buf",
".",
"count",
"(",
"'\\n'",
")",
"buf",
"=",
"read_f",
"(",
"buf_size",
")",
"return",
"lines",
"finally",
":",
"f",
".",
"close",
"(",
")"
] |
fast counting to the lines of a given filename
through only reading out a limited buffer
|
[
"fast",
"counting",
"to",
"the",
"lines",
"of",
"a",
"given",
"filename",
"through",
"only",
"reading",
"out",
"a",
"limited",
"buffer"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/os/countLines.py#L5-L23
|
244,162
|
pybel/pybel-artifactory
|
src/pybel_artifactory/hashing.py
|
_names_to_bytes
|
def _names_to_bytes(names):
"""Reproducibly converts an iterable of strings to bytes
:param iter[str] names: An iterable of strings
:rtype: bytes
"""
names = sorted(names)
names_bytes = json.dumps(names).encode('utf8')
return names_bytes
|
python
|
def _names_to_bytes(names):
"""Reproducibly converts an iterable of strings to bytes
:param iter[str] names: An iterable of strings
:rtype: bytes
"""
names = sorted(names)
names_bytes = json.dumps(names).encode('utf8')
return names_bytes
|
[
"def",
"_names_to_bytes",
"(",
"names",
")",
":",
"names",
"=",
"sorted",
"(",
"names",
")",
"names_bytes",
"=",
"json",
".",
"dumps",
"(",
"names",
")",
".",
"encode",
"(",
"'utf8'",
")",
"return",
"names_bytes"
] |
Reproducibly converts an iterable of strings to bytes
:param iter[str] names: An iterable of strings
:rtype: bytes
|
[
"Reproducibly",
"converts",
"an",
"iterable",
"of",
"strings",
"to",
"bytes"
] |
720107780a59be2ef08885290dfa519b1da62871
|
https://github.com/pybel/pybel-artifactory/blob/720107780a59be2ef08885290dfa519b1da62871/src/pybel_artifactory/hashing.py#L19-L27
|
244,163
|
pybel/pybel-artifactory
|
src/pybel_artifactory/hashing.py
|
hash_names
|
def hash_names(names, hash_function=None):
"""Return the hash of an iterable of strings, or a dict if multiple hash functions given.
:param iter[str] names: An iterable of strings
:param hash_function: A hash function or list of hash functions, like :func:`hashlib.md5` or :func:`hashlib.sha512`
:rtype: str
"""
hash_function = hash_function or hashlib.md5
names_bytes = _names_to_bytes(names)
return hash_function(names_bytes).hexdigest()
|
python
|
def hash_names(names, hash_function=None):
"""Return the hash of an iterable of strings, or a dict if multiple hash functions given.
:param iter[str] names: An iterable of strings
:param hash_function: A hash function or list of hash functions, like :func:`hashlib.md5` or :func:`hashlib.sha512`
:rtype: str
"""
hash_function = hash_function or hashlib.md5
names_bytes = _names_to_bytes(names)
return hash_function(names_bytes).hexdigest()
|
[
"def",
"hash_names",
"(",
"names",
",",
"hash_function",
"=",
"None",
")",
":",
"hash_function",
"=",
"hash_function",
"or",
"hashlib",
".",
"md5",
"names_bytes",
"=",
"_names_to_bytes",
"(",
"names",
")",
"return",
"hash_function",
"(",
"names_bytes",
")",
".",
"hexdigest",
"(",
")"
] |
Return the hash of an iterable of strings, or a dict if multiple hash functions given.
:param iter[str] names: An iterable of strings
:param hash_function: A hash function or list of hash functions, like :func:`hashlib.md5` or :func:`hashlib.sha512`
:rtype: str
|
[
"Return",
"the",
"hash",
"of",
"an",
"iterable",
"of",
"strings",
"or",
"a",
"dict",
"if",
"multiple",
"hash",
"functions",
"given",
"."
] |
720107780a59be2ef08885290dfa519b1da62871
|
https://github.com/pybel/pybel-artifactory/blob/720107780a59be2ef08885290dfa519b1da62871/src/pybel_artifactory/hashing.py#L30-L39
|
244,164
|
pybel/pybel-artifactory
|
src/pybel_artifactory/hashing.py
|
get_bel_resource_hash
|
def get_bel_resource_hash(location, hash_function=None):
"""Get a BEL resource file and returns its semantic hash.
:param str location: URL of a resource
:param hash_function: A hash function or list of hash functions, like :func:`hashlib.md5` or :code:`hashlib.sha512`
:return: The hexadecimal digest of the hash of the values in the resource
:rtype: str
:raises: pybel.resources.exc.ResourceError
"""
resource = get_bel_resource(location)
return hash_names(
resource['Values'],
hash_function=hash_function
)
|
python
|
def get_bel_resource_hash(location, hash_function=None):
"""Get a BEL resource file and returns its semantic hash.
:param str location: URL of a resource
:param hash_function: A hash function or list of hash functions, like :func:`hashlib.md5` or :code:`hashlib.sha512`
:return: The hexadecimal digest of the hash of the values in the resource
:rtype: str
:raises: pybel.resources.exc.ResourceError
"""
resource = get_bel_resource(location)
return hash_names(
resource['Values'],
hash_function=hash_function
)
|
[
"def",
"get_bel_resource_hash",
"(",
"location",
",",
"hash_function",
"=",
"None",
")",
":",
"resource",
"=",
"get_bel_resource",
"(",
"location",
")",
"return",
"hash_names",
"(",
"resource",
"[",
"'Values'",
"]",
",",
"hash_function",
"=",
"hash_function",
")"
] |
Get a BEL resource file and returns its semantic hash.
:param str location: URL of a resource
:param hash_function: A hash function or list of hash functions, like :func:`hashlib.md5` or :code:`hashlib.sha512`
:return: The hexadecimal digest of the hash of the values in the resource
:rtype: str
:raises: pybel.resources.exc.ResourceError
|
[
"Get",
"a",
"BEL",
"resource",
"file",
"and",
"returns",
"its",
"semantic",
"hash",
"."
] |
720107780a59be2ef08885290dfa519b1da62871
|
https://github.com/pybel/pybel-artifactory/blob/720107780a59be2ef08885290dfa519b1da62871/src/pybel_artifactory/hashing.py#L42-L56
|
244,165
|
dankelley/nota
|
nota/notaclass.py
|
Nota.book_name
|
def book_name(self, number):
'''Return name of book with given index.'''
try:
name = self.cur.execute("SELECT name FROM book WHERE number = ?;", [number]).fetchone()
except:
self.error("cannot look up name of book number %s" % number)
return(str(name[0]))
|
python
|
def book_name(self, number):
'''Return name of book with given index.'''
try:
name = self.cur.execute("SELECT name FROM book WHERE number = ?;", [number]).fetchone()
except:
self.error("cannot look up name of book number %s" % number)
return(str(name[0]))
|
[
"def",
"book_name",
"(",
"self",
",",
"number",
")",
":",
"try",
":",
"name",
"=",
"self",
".",
"cur",
".",
"execute",
"(",
"\"SELECT name FROM book WHERE number = ?;\"",
",",
"[",
"number",
"]",
")",
".",
"fetchone",
"(",
")",
"except",
":",
"self",
".",
"error",
"(",
"\"cannot look up name of book number %s\"",
"%",
"number",
")",
"return",
"(",
"str",
"(",
"name",
"[",
"0",
"]",
")",
")"
] |
Return name of book with given index.
|
[
"Return",
"name",
"of",
"book",
"with",
"given",
"index",
"."
] |
245cd575db60daaea6eebd5edc1d048c5fe23c9b
|
https://github.com/dankelley/nota/blob/245cd575db60daaea6eebd5edc1d048c5fe23c9b/nota/notaclass.py#L271-L277
|
244,166
|
dankelley/nota
|
nota/notaclass.py
|
Nota.book_number
|
def book_number(self, name):
'''Return number of book with given name.'''
try:
number = self.cur.execute("SELECT number FROM book WHERE name= ?;", [name]).fetchone()
except:
self.error("cannot look up number of book with name %s" % name)
return(number)
|
python
|
def book_number(self, name):
'''Return number of book with given name.'''
try:
number = self.cur.execute("SELECT number FROM book WHERE name= ?;", [name]).fetchone()
except:
self.error("cannot look up number of book with name %s" % name)
return(number)
|
[
"def",
"book_number",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"number",
"=",
"self",
".",
"cur",
".",
"execute",
"(",
"\"SELECT number FROM book WHERE name= ?;\"",
",",
"[",
"name",
"]",
")",
".",
"fetchone",
"(",
")",
"except",
":",
"self",
".",
"error",
"(",
"\"cannot look up number of book with name %s\"",
"%",
"name",
")",
"return",
"(",
"number",
")"
] |
Return number of book with given name.
|
[
"Return",
"number",
"of",
"book",
"with",
"given",
"name",
"."
] |
245cd575db60daaea6eebd5edc1d048c5fe23c9b
|
https://github.com/dankelley/nota/blob/245cd575db60daaea6eebd5edc1d048c5fe23c9b/nota/notaclass.py#L280-L286
|
244,167
|
dankelley/nota
|
nota/notaclass.py
|
Nota.list_books
|
def list_books(self):
''' Return the list of book names '''
names = []
try:
for n in self.cur.execute("SELECT name FROM book;").fetchall():
names.extend(n)
except:
self.error("ERROR: cannot find database table 'book'")
return(names)
|
python
|
def list_books(self):
''' Return the list of book names '''
names = []
try:
for n in self.cur.execute("SELECT name FROM book;").fetchall():
names.extend(n)
except:
self.error("ERROR: cannot find database table 'book'")
return(names)
|
[
"def",
"list_books",
"(",
"self",
")",
":",
"names",
"=",
"[",
"]",
"try",
":",
"for",
"n",
"in",
"self",
".",
"cur",
".",
"execute",
"(",
"\"SELECT name FROM book;\"",
")",
".",
"fetchall",
"(",
")",
":",
"names",
".",
"extend",
"(",
"n",
")",
"except",
":",
"self",
".",
"error",
"(",
"\"ERROR: cannot find database table 'book'\"",
")",
"return",
"(",
"names",
")"
] |
Return the list of book names
|
[
"Return",
"the",
"list",
"of",
"book",
"names"
] |
245cd575db60daaea6eebd5edc1d048c5fe23c9b
|
https://github.com/dankelley/nota/blob/245cd575db60daaea6eebd5edc1d048c5fe23c9b/nota/notaclass.py#L289-L297
|
244,168
|
dankelley/nota
|
nota/notaclass.py
|
Nota.create_book
|
def create_book(self, name):
"""Create a new book"""
name = name.strip()
if not len(name):
self.error("Cannot have a blank book name")
# The next could be relaxed, if users want commas in book names, but
# I prefer to keep it, in case later there could be a syntax for multiple
# book names, using comma.
if name.find(",") >= 0:
self.error("Cannot have a ',' in a book name")
existing = self.list_books()
nexisting = len(existing)
if name in existing:
self.error("Already have a book named '%s'" % name)
try:
self.cur.execute("INSERT INTO book (number, name) VALUES(?, ?);", (nexisting, name))
self.con.commit()
except:
self.fyi("Error adding a book named '%s'" % name)
|
python
|
def create_book(self, name):
"""Create a new book"""
name = name.strip()
if not len(name):
self.error("Cannot have a blank book name")
# The next could be relaxed, if users want commas in book names, but
# I prefer to keep it, in case later there could be a syntax for multiple
# book names, using comma.
if name.find(",") >= 0:
self.error("Cannot have a ',' in a book name")
existing = self.list_books()
nexisting = len(existing)
if name in existing:
self.error("Already have a book named '%s'" % name)
try:
self.cur.execute("INSERT INTO book (number, name) VALUES(?, ?);", (nexisting, name))
self.con.commit()
except:
self.fyi("Error adding a book named '%s'" % name)
|
[
"def",
"create_book",
"(",
"self",
",",
"name",
")",
":",
"name",
"=",
"name",
".",
"strip",
"(",
")",
"if",
"not",
"len",
"(",
"name",
")",
":",
"self",
".",
"error",
"(",
"\"Cannot have a blank book name\"",
")",
"# The next could be relaxed, if users want commas in book names, but",
"# I prefer to keep it, in case later there could be a syntax for multiple",
"# book names, using comma.",
"if",
"name",
".",
"find",
"(",
"\",\"",
")",
">=",
"0",
":",
"self",
".",
"error",
"(",
"\"Cannot have a ',' in a book name\"",
")",
"existing",
"=",
"self",
".",
"list_books",
"(",
")",
"nexisting",
"=",
"len",
"(",
"existing",
")",
"if",
"name",
"in",
"existing",
":",
"self",
".",
"error",
"(",
"\"Already have a book named '%s'\"",
"%",
"name",
")",
"try",
":",
"self",
".",
"cur",
".",
"execute",
"(",
"\"INSERT INTO book (number, name) VALUES(?, ?);\"",
",",
"(",
"nexisting",
",",
"name",
")",
")",
"self",
".",
"con",
".",
"commit",
"(",
")",
"except",
":",
"self",
".",
"fyi",
"(",
"\"Error adding a book named '%s'\"",
"%",
"name",
")"
] |
Create a new book
|
[
"Create",
"a",
"new",
"book"
] |
245cd575db60daaea6eebd5edc1d048c5fe23c9b
|
https://github.com/dankelley/nota/blob/245cd575db60daaea6eebd5edc1d048c5fe23c9b/nota/notaclass.py#L300-L318
|
244,169
|
dankelley/nota
|
nota/notaclass.py
|
Nota.initialize
|
def initialize(self, author=""):
''' Initialize the database. This is dangerous since it removes any
existing content.'''
self.cur.execute("CREATE TABLE version(major, minor);")
self.cur.execute("INSERT INTO version(major, minor) VALUES (?,?);",
(self.appversion[0], self.appversion[1]))
#20150314 self.cur.execute("CREATE TABLE note(noteId integer primary key autoincrement, authorId, date, modified, due, title, content, hash, privacy DEFAULT 0, in_trash DEFAULT 0);")
self.cur.execute("CREATE TABLE note(noteId integer primary key autoincrement, authorId, date, modified, due, title, content, hash, privacy DEFAULT 0, book DEFAULT 1);")
self.cur.execute("CREATE TABLE author(authorId integer primary key autoincrement, name, nickname);")
self.cur.execute("CREATE TABLE alias(aliasId integer primary key autoincrement, item, alias);")
self.cur.execute("CREATE TABLE keyword(keywordId integer primary key autoincrement, keyword);")
self.cur.execute("CREATE TABLE notekeyword(notekeywordId integer primary key autoincrement, noteid, keywordid);")
self.cur.execute("CREATE TABLE book(bookId integer primary key autoincrement, number, name DEFAULT '');")
self.cur.execute("INSERT INTO book(number, name) VALUES (0, 'Trash');")
self.cur.execute("INSERT INTO book(number, name) VALUES (1, 'Default');")
self.con.commit()
|
python
|
def initialize(self, author=""):
''' Initialize the database. This is dangerous since it removes any
existing content.'''
self.cur.execute("CREATE TABLE version(major, minor);")
self.cur.execute("INSERT INTO version(major, minor) VALUES (?,?);",
(self.appversion[0], self.appversion[1]))
#20150314 self.cur.execute("CREATE TABLE note(noteId integer primary key autoincrement, authorId, date, modified, due, title, content, hash, privacy DEFAULT 0, in_trash DEFAULT 0);")
self.cur.execute("CREATE TABLE note(noteId integer primary key autoincrement, authorId, date, modified, due, title, content, hash, privacy DEFAULT 0, book DEFAULT 1);")
self.cur.execute("CREATE TABLE author(authorId integer primary key autoincrement, name, nickname);")
self.cur.execute("CREATE TABLE alias(aliasId integer primary key autoincrement, item, alias);")
self.cur.execute("CREATE TABLE keyword(keywordId integer primary key autoincrement, keyword);")
self.cur.execute("CREATE TABLE notekeyword(notekeywordId integer primary key autoincrement, noteid, keywordid);")
self.cur.execute("CREATE TABLE book(bookId integer primary key autoincrement, number, name DEFAULT '');")
self.cur.execute("INSERT INTO book(number, name) VALUES (0, 'Trash');")
self.cur.execute("INSERT INTO book(number, name) VALUES (1, 'Default');")
self.con.commit()
|
[
"def",
"initialize",
"(",
"self",
",",
"author",
"=",
"\"\"",
")",
":",
"self",
".",
"cur",
".",
"execute",
"(",
"\"CREATE TABLE version(major, minor);\"",
")",
"self",
".",
"cur",
".",
"execute",
"(",
"\"INSERT INTO version(major, minor) VALUES (?,?);\"",
",",
"(",
"self",
".",
"appversion",
"[",
"0",
"]",
",",
"self",
".",
"appversion",
"[",
"1",
"]",
")",
")",
"#20150314 self.cur.execute(\"CREATE TABLE note(noteId integer primary key autoincrement, authorId, date, modified, due, title, content, hash, privacy DEFAULT 0, in_trash DEFAULT 0);\")",
"self",
".",
"cur",
".",
"execute",
"(",
"\"CREATE TABLE note(noteId integer primary key autoincrement, authorId, date, modified, due, title, content, hash, privacy DEFAULT 0, book DEFAULT 1);\"",
")",
"self",
".",
"cur",
".",
"execute",
"(",
"\"CREATE TABLE author(authorId integer primary key autoincrement, name, nickname);\"",
")",
"self",
".",
"cur",
".",
"execute",
"(",
"\"CREATE TABLE alias(aliasId integer primary key autoincrement, item, alias);\"",
")",
"self",
".",
"cur",
".",
"execute",
"(",
"\"CREATE TABLE keyword(keywordId integer primary key autoincrement, keyword);\"",
")",
"self",
".",
"cur",
".",
"execute",
"(",
"\"CREATE TABLE notekeyword(notekeywordId integer primary key autoincrement, noteid, keywordid);\"",
")",
"self",
".",
"cur",
".",
"execute",
"(",
"\"CREATE TABLE book(bookId integer primary key autoincrement, number, name DEFAULT '');\"",
")",
"self",
".",
"cur",
".",
"execute",
"(",
"\"INSERT INTO book(number, name) VALUES (0, 'Trash');\"",
")",
"self",
".",
"cur",
".",
"execute",
"(",
"\"INSERT INTO book(number, name) VALUES (1, 'Default');\"",
")",
"self",
".",
"con",
".",
"commit",
"(",
")"
] |
Initialize the database. This is dangerous since it removes any
existing content.
|
[
"Initialize",
"the",
"database",
".",
"This",
"is",
"dangerous",
"since",
"it",
"removes",
"any",
"existing",
"content",
"."
] |
245cd575db60daaea6eebd5edc1d048c5fe23c9b
|
https://github.com/dankelley/nota/blob/245cd575db60daaea6eebd5edc1d048c5fe23c9b/nota/notaclass.py#L369-L384
|
244,170
|
dankelley/nota
|
nota/notaclass.py
|
Nota.keyword_hookup
|
def keyword_hookup(self, noteId, keywords):
'''
Unhook existing cross-linking entries.
'''
try:
self.cur.execute("DELETE FROM notekeyword WHERE noteid=?", [noteId])
except:
self.error("ERROR: cannot unhook previous keywords")
# Now, hook up new the entries, one by one.
for keyword in keywords:
keyword = keyword.decode('utf-8')
self.fyi(" inserting keyword:", keyword)
# Make sure the keyword table contains the word in question.
keywordId = self.con.execute("SELECT keywordId FROM keyword WHERE keyword = ?;", [keyword]).fetchone()
try:
if keywordId:
self.fyi(" (existing keyword with id: %s)" % keywordId)
keywordId = keywordId[0]
else:
self.fyi(" (new keyword)")
self.cur.execute("INSERT INTO keyword(keyword) VALUES (?);", [keyword])
keywordId = self.cur.lastrowid
# Finally, do the actual hookup for this word.
self.con.execute("INSERT INTO notekeyword(noteId, keywordID) VALUES(?, ?)", [noteId, keywordId])
except:
self.error("error hooking up keyword '%s'" % keyword)
self.con.commit()
|
python
|
def keyword_hookup(self, noteId, keywords):
'''
Unhook existing cross-linking entries.
'''
try:
self.cur.execute("DELETE FROM notekeyword WHERE noteid=?", [noteId])
except:
self.error("ERROR: cannot unhook previous keywords")
# Now, hook up new the entries, one by one.
for keyword in keywords:
keyword = keyword.decode('utf-8')
self.fyi(" inserting keyword:", keyword)
# Make sure the keyword table contains the word in question.
keywordId = self.con.execute("SELECT keywordId FROM keyword WHERE keyword = ?;", [keyword]).fetchone()
try:
if keywordId:
self.fyi(" (existing keyword with id: %s)" % keywordId)
keywordId = keywordId[0]
else:
self.fyi(" (new keyword)")
self.cur.execute("INSERT INTO keyword(keyword) VALUES (?);", [keyword])
keywordId = self.cur.lastrowid
# Finally, do the actual hookup for this word.
self.con.execute("INSERT INTO notekeyword(noteId, keywordID) VALUES(?, ?)", [noteId, keywordId])
except:
self.error("error hooking up keyword '%s'" % keyword)
self.con.commit()
|
[
"def",
"keyword_hookup",
"(",
"self",
",",
"noteId",
",",
"keywords",
")",
":",
"try",
":",
"self",
".",
"cur",
".",
"execute",
"(",
"\"DELETE FROM notekeyword WHERE noteid=?\"",
",",
"[",
"noteId",
"]",
")",
"except",
":",
"self",
".",
"error",
"(",
"\"ERROR: cannot unhook previous keywords\"",
")",
"# Now, hook up new the entries, one by one.",
"for",
"keyword",
"in",
"keywords",
":",
"keyword",
"=",
"keyword",
".",
"decode",
"(",
"'utf-8'",
")",
"self",
".",
"fyi",
"(",
"\" inserting keyword:\"",
",",
"keyword",
")",
"# Make sure the keyword table contains the word in question.",
"keywordId",
"=",
"self",
".",
"con",
".",
"execute",
"(",
"\"SELECT keywordId FROM keyword WHERE keyword = ?;\"",
",",
"[",
"keyword",
"]",
")",
".",
"fetchone",
"(",
")",
"try",
":",
"if",
"keywordId",
":",
"self",
".",
"fyi",
"(",
"\" (existing keyword with id: %s)\"",
"%",
"keywordId",
")",
"keywordId",
"=",
"keywordId",
"[",
"0",
"]",
"else",
":",
"self",
".",
"fyi",
"(",
"\" (new keyword)\"",
")",
"self",
".",
"cur",
".",
"execute",
"(",
"\"INSERT INTO keyword(keyword) VALUES (?);\"",
",",
"[",
"keyword",
"]",
")",
"keywordId",
"=",
"self",
".",
"cur",
".",
"lastrowid",
"# Finally, do the actual hookup for this word.",
"self",
".",
"con",
".",
"execute",
"(",
"\"INSERT INTO notekeyword(noteId, keywordID) VALUES(?, ?)\"",
",",
"[",
"noteId",
",",
"keywordId",
"]",
")",
"except",
":",
"self",
".",
"error",
"(",
"\"error hooking up keyword '%s'\"",
"%",
"keyword",
")",
"self",
".",
"con",
".",
"commit",
"(",
")"
] |
Unhook existing cross-linking entries.
|
[
"Unhook",
"existing",
"cross",
"-",
"linking",
"entries",
"."
] |
245cd575db60daaea6eebd5edc1d048c5fe23c9b
|
https://github.com/dankelley/nota/blob/245cd575db60daaea6eebd5edc1d048c5fe23c9b/nota/notaclass.py#L505-L531
|
244,171
|
dankelley/nota
|
nota/notaclass.py
|
Nota.list_keywords
|
def list_keywords(self):
''' Return the list of keywords '''
names = []
try:
for n in self.cur.execute("SELECT keyword FROM keyword;").fetchall():
# Strip out leading and trailing whitespaces (can be artifacts of old data)
k = n[0].strip()
if len(k):
names.extend([k])
except:
self.error("ERROR: cannot find database table 'keyword'")
names = list(set(names)) # remove duplicates
names = sorted(names, key=lambda s: s.lower())
return(names)
|
python
|
def list_keywords(self):
''' Return the list of keywords '''
names = []
try:
for n in self.cur.execute("SELECT keyword FROM keyword;").fetchall():
# Strip out leading and trailing whitespaces (can be artifacts of old data)
k = n[0].strip()
if len(k):
names.extend([k])
except:
self.error("ERROR: cannot find database table 'keyword'")
names = list(set(names)) # remove duplicates
names = sorted(names, key=lambda s: s.lower())
return(names)
|
[
"def",
"list_keywords",
"(",
"self",
")",
":",
"names",
"=",
"[",
"]",
"try",
":",
"for",
"n",
"in",
"self",
".",
"cur",
".",
"execute",
"(",
"\"SELECT keyword FROM keyword;\"",
")",
".",
"fetchall",
"(",
")",
":",
"# Strip out leading and trailing whitespaces (can be artifacts of old data)",
"k",
"=",
"n",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"k",
")",
":",
"names",
".",
"extend",
"(",
"[",
"k",
"]",
")",
"except",
":",
"self",
".",
"error",
"(",
"\"ERROR: cannot find database table 'keyword'\"",
")",
"names",
"=",
"list",
"(",
"set",
"(",
"names",
")",
")",
"# remove duplicates",
"names",
"=",
"sorted",
"(",
"names",
",",
"key",
"=",
"lambda",
"s",
":",
"s",
".",
"lower",
"(",
")",
")",
"return",
"(",
"names",
")"
] |
Return the list of keywords
|
[
"Return",
"the",
"list",
"of",
"keywords"
] |
245cd575db60daaea6eebd5edc1d048c5fe23c9b
|
https://github.com/dankelley/nota/blob/245cd575db60daaea6eebd5edc1d048c5fe23c9b/nota/notaclass.py#L534-L547
|
244,172
|
dankelley/nota
|
nota/notaclass.py
|
Nota.find_recent
|
def find_recent(self, nrecent=4):
'''Find recent non-trashed notes'''
try:
rows = self.cur.execute("SELECT noteId FROM note WHERE book > 0 ORDER BY date DESC LIMIT %d;"%nrecent).fetchall()
except:
self.error("nota.find_recent() cannot look up note list")
# Possibly save time by finding IDs first.
noteIds = []
for r in rows:
noteIds.append(r[0],)
self.fyi("noteIds: %s" % noteIds)
rval = []
for n in noteIds:
note = None
try:
note = self.cur.execute("SELECT noteId, date, title, content, hash, book FROM note WHERE noteId = ?;", [n]).fetchone()
except:
self.warning("Problem extracting note %s from database for recent-list" % n)
next
if note:
keywordIds = []
keywordIds.extend(self.con.execute("SELECT keywordid FROM notekeyword WHERE notekeyword.noteid=?;", [n]))
keywords = []
for k in keywordIds:
keywords.append(self.cur.execute("SELECT keyword FROM keyword WHERE keywordId=?;", k).fetchone()[0])
rval.append({"noteId":note[0], "date":note[1], "title":note[2], "keywords":keywords,
"content":note[3], "hash":note[4], "book":note[5]})
return rval
|
python
|
def find_recent(self, nrecent=4):
'''Find recent non-trashed notes'''
try:
rows = self.cur.execute("SELECT noteId FROM note WHERE book > 0 ORDER BY date DESC LIMIT %d;"%nrecent).fetchall()
except:
self.error("nota.find_recent() cannot look up note list")
# Possibly save time by finding IDs first.
noteIds = []
for r in rows:
noteIds.append(r[0],)
self.fyi("noteIds: %s" % noteIds)
rval = []
for n in noteIds:
note = None
try:
note = self.cur.execute("SELECT noteId, date, title, content, hash, book FROM note WHERE noteId = ?;", [n]).fetchone()
except:
self.warning("Problem extracting note %s from database for recent-list" % n)
next
if note:
keywordIds = []
keywordIds.extend(self.con.execute("SELECT keywordid FROM notekeyword WHERE notekeyword.noteid=?;", [n]))
keywords = []
for k in keywordIds:
keywords.append(self.cur.execute("SELECT keyword FROM keyword WHERE keywordId=?;", k).fetchone()[0])
rval.append({"noteId":note[0], "date":note[1], "title":note[2], "keywords":keywords,
"content":note[3], "hash":note[4], "book":note[5]})
return rval
|
[
"def",
"find_recent",
"(",
"self",
",",
"nrecent",
"=",
"4",
")",
":",
"try",
":",
"rows",
"=",
"self",
".",
"cur",
".",
"execute",
"(",
"\"SELECT noteId FROM note WHERE book > 0 ORDER BY date DESC LIMIT %d;\"",
"%",
"nrecent",
")",
".",
"fetchall",
"(",
")",
"except",
":",
"self",
".",
"error",
"(",
"\"nota.find_recent() cannot look up note list\"",
")",
"# Possibly save time by finding IDs first.",
"noteIds",
"=",
"[",
"]",
"for",
"r",
"in",
"rows",
":",
"noteIds",
".",
"append",
"(",
"r",
"[",
"0",
"]",
",",
")",
"self",
".",
"fyi",
"(",
"\"noteIds: %s\"",
"%",
"noteIds",
")",
"rval",
"=",
"[",
"]",
"for",
"n",
"in",
"noteIds",
":",
"note",
"=",
"None",
"try",
":",
"note",
"=",
"self",
".",
"cur",
".",
"execute",
"(",
"\"SELECT noteId, date, title, content, hash, book FROM note WHERE noteId = ?;\"",
",",
"[",
"n",
"]",
")",
".",
"fetchone",
"(",
")",
"except",
":",
"self",
".",
"warning",
"(",
"\"Problem extracting note %s from database for recent-list\"",
"%",
"n",
")",
"next",
"if",
"note",
":",
"keywordIds",
"=",
"[",
"]",
"keywordIds",
".",
"extend",
"(",
"self",
".",
"con",
".",
"execute",
"(",
"\"SELECT keywordid FROM notekeyword WHERE notekeyword.noteid=?;\"",
",",
"[",
"n",
"]",
")",
")",
"keywords",
"=",
"[",
"]",
"for",
"k",
"in",
"keywordIds",
":",
"keywords",
".",
"append",
"(",
"self",
".",
"cur",
".",
"execute",
"(",
"\"SELECT keyword FROM keyword WHERE keywordId=?;\"",
",",
"k",
")",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
")",
"rval",
".",
"append",
"(",
"{",
"\"noteId\"",
":",
"note",
"[",
"0",
"]",
",",
"\"date\"",
":",
"note",
"[",
"1",
"]",
",",
"\"title\"",
":",
"note",
"[",
"2",
"]",
",",
"\"keywords\"",
":",
"keywords",
",",
"\"content\"",
":",
"note",
"[",
"3",
"]",
",",
"\"hash\"",
":",
"note",
"[",
"4",
"]",
",",
"\"book\"",
":",
"note",
"[",
"5",
"]",
"}",
")",
"return",
"rval"
] |
Find recent non-trashed notes
|
[
"Find",
"recent",
"non",
"-",
"trashed",
"notes"
] |
245cd575db60daaea6eebd5edc1d048c5fe23c9b
|
https://github.com/dankelley/nota/blob/245cd575db60daaea6eebd5edc1d048c5fe23c9b/nota/notaclass.py#L918-L945
|
244,173
|
dasevilla/rovi-python
|
roviclient/video.py
|
VideoApi._cosmoid_request
|
def _cosmoid_request(self, resource, cosmoid, **kwargs):
"""
Maps to the Generic API method for requests who's only parameter is ``cosmoid``
"""
params = {
'cosmoid': cosmoid,
}
params.update(kwargs)
return self.make_request(resource, params)
|
python
|
def _cosmoid_request(self, resource, cosmoid, **kwargs):
"""
Maps to the Generic API method for requests who's only parameter is ``cosmoid``
"""
params = {
'cosmoid': cosmoid,
}
params.update(kwargs)
return self.make_request(resource, params)
|
[
"def",
"_cosmoid_request",
"(",
"self",
",",
"resource",
",",
"cosmoid",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"'cosmoid'",
":",
"cosmoid",
",",
"}",
"params",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"make_request",
"(",
"resource",
",",
"params",
")"
] |
Maps to the Generic API method for requests who's only parameter is ``cosmoid``
|
[
"Maps",
"to",
"the",
"Generic",
"API",
"method",
"for",
"requests",
"who",
"s",
"only",
"parameter",
"is",
"cosmoid"
] |
46039d6ebfcf2ff20b4edb4636cb972682cf6af4
|
https://github.com/dasevilla/rovi-python/blob/46039d6ebfcf2ff20b4edb4636cb972682cf6af4/roviclient/video.py#L15-L25
|
244,174
|
dasevilla/rovi-python
|
roviclient/video.py
|
VideoApi.season_info
|
def season_info(self, cosmoid, season, **kwargs):
"""
Returns information about a season of a TV series
Maps to the `season info <http://prod-doc.rovicorp.com/mashery/index.php/V1.MetaData.VideoService.Video:Season>`_ API method.
"""
resource = 'season/%d/info' % season
return self._cosmoid_request(resource, cosmoid, **kwargs)
params = {
'cosmoid': cosmoid,
}
params.update(kwargs)
return self.make_request(resource, params)
|
python
|
def season_info(self, cosmoid, season, **kwargs):
"""
Returns information about a season of a TV series
Maps to the `season info <http://prod-doc.rovicorp.com/mashery/index.php/V1.MetaData.VideoService.Video:Season>`_ API method.
"""
resource = 'season/%d/info' % season
return self._cosmoid_request(resource, cosmoid, **kwargs)
params = {
'cosmoid': cosmoid,
}
params.update(kwargs)
return self.make_request(resource, params)
|
[
"def",
"season_info",
"(",
"self",
",",
"cosmoid",
",",
"season",
",",
"*",
"*",
"kwargs",
")",
":",
"resource",
"=",
"'season/%d/info'",
"%",
"season",
"return",
"self",
".",
"_cosmoid_request",
"(",
"resource",
",",
"cosmoid",
",",
"*",
"*",
"kwargs",
")",
"params",
"=",
"{",
"'cosmoid'",
":",
"cosmoid",
",",
"}",
"params",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"make_request",
"(",
"resource",
",",
"params",
")"
] |
Returns information about a season of a TV series
Maps to the `season info <http://prod-doc.rovicorp.com/mashery/index.php/V1.MetaData.VideoService.Video:Season>`_ API method.
|
[
"Returns",
"information",
"about",
"a",
"season",
"of",
"a",
"TV",
"series"
] |
46039d6ebfcf2ff20b4edb4636cb972682cf6af4
|
https://github.com/dasevilla/rovi-python/blob/46039d6ebfcf2ff20b4edb4636cb972682cf6af4/roviclient/video.py#L36-L51
|
244,175
|
dasevilla/rovi-python
|
roviclient/video.py
|
VideoApi.episode_info
|
def episode_info(self, cosmoid, season, episode, **kwargs):
"""
Returns information about an episode in a television series
Maps to the `episode info <http://prod-doc.rovicorp.com/mashery/index.php/V1.MetaData.VideoService.Video:SeasonEpisode>`_ API method.
"""
resource = 'season/%d/episode/%d/info' % (season, episode)
return self._cosmoid_request(resource, cosmoid, **kwargs)
|
python
|
def episode_info(self, cosmoid, season, episode, **kwargs):
"""
Returns information about an episode in a television series
Maps to the `episode info <http://prod-doc.rovicorp.com/mashery/index.php/V1.MetaData.VideoService.Video:SeasonEpisode>`_ API method.
"""
resource = 'season/%d/episode/%d/info' % (season, episode)
return self._cosmoid_request(resource, cosmoid, **kwargs)
|
[
"def",
"episode_info",
"(",
"self",
",",
"cosmoid",
",",
"season",
",",
"episode",
",",
"*",
"*",
"kwargs",
")",
":",
"resource",
"=",
"'season/%d/episode/%d/info'",
"%",
"(",
"season",
",",
"episode",
")",
"return",
"self",
".",
"_cosmoid_request",
"(",
"resource",
",",
"cosmoid",
",",
"*",
"*",
"kwargs",
")"
] |
Returns information about an episode in a television series
Maps to the `episode info <http://prod-doc.rovicorp.com/mashery/index.php/V1.MetaData.VideoService.Video:SeasonEpisode>`_ API method.
|
[
"Returns",
"information",
"about",
"an",
"episode",
"in",
"a",
"television",
"series"
] |
46039d6ebfcf2ff20b4edb4636cb972682cf6af4
|
https://github.com/dasevilla/rovi-python/blob/46039d6ebfcf2ff20b4edb4636cb972682cf6af4/roviclient/video.py#L53-L62
|
244,176
|
rikrd/inspire
|
inspirespeech/__init__.py
|
_load_zip_wav
|
def _load_zip_wav(zfile, offset=0, count=None):
"""Load a wav file into an array from frame start to fram end
:param zfile: ZipExtFile file-like object from where to load the audio
:param offset: First sample to load
:param count: Maximum number of samples to load
:return: The audio samples in a numpy array of floats
"""
buf = StringIO.StringIO(zfile.read())
sample_rate, audio = wavfile.read(buf)
audio = audio[offset:]
if count:
audio = audio[:count]
return sample_rate, audio
|
python
|
def _load_zip_wav(zfile, offset=0, count=None):
"""Load a wav file into an array from frame start to fram end
:param zfile: ZipExtFile file-like object from where to load the audio
:param offset: First sample to load
:param count: Maximum number of samples to load
:return: The audio samples in a numpy array of floats
"""
buf = StringIO.StringIO(zfile.read())
sample_rate, audio = wavfile.read(buf)
audio = audio[offset:]
if count:
audio = audio[:count]
return sample_rate, audio
|
[
"def",
"_load_zip_wav",
"(",
"zfile",
",",
"offset",
"=",
"0",
",",
"count",
"=",
"None",
")",
":",
"buf",
"=",
"StringIO",
".",
"StringIO",
"(",
"zfile",
".",
"read",
"(",
")",
")",
"sample_rate",
",",
"audio",
"=",
"wavfile",
".",
"read",
"(",
"buf",
")",
"audio",
"=",
"audio",
"[",
"offset",
":",
"]",
"if",
"count",
":",
"audio",
"=",
"audio",
"[",
":",
"count",
"]",
"return",
"sample_rate",
",",
"audio"
] |
Load a wav file into an array from frame start to fram end
:param zfile: ZipExtFile file-like object from where to load the audio
:param offset: First sample to load
:param count: Maximum number of samples to load
:return: The audio samples in a numpy array of floats
|
[
"Load",
"a",
"wav",
"file",
"into",
"an",
"array",
"from",
"frame",
"start",
"to",
"fram",
"end"
] |
e281c0266a9a9633f34ab70f9c3ad58036c19b59
|
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L39-L56
|
244,177
|
rikrd/inspire
|
inspirespeech/__init__.py
|
get_edit_scripts
|
def get_edit_scripts(pron_a, pron_b, edit_costs=(1.0, 1.0, 1.0)):
"""Get the edit scripts to transform between two given pronunciations.
:param pron_a: Source pronunciation as list of strings, each string corresponding to a phoneme
:param pron_b: Target pronunciation as list of strings, each string corresponding to a phoneme
:param edit_costs: Costs of insert, replace and delete respectively
:return: List of edit scripts. Each edit script is represented as a list of operations,
where each operation is a dictionary.
"""
op_costs = {'insert': lambda x: edit_costs[0],
'match': lambda x, y: 0 if x == y else edit_costs[1],
'delete': lambda x: edit_costs[2]}
distance, scripts, costs, ops = edit_distance.best_transforms(pron_a, pron_b, op_costs=op_costs)
return [full_edit_script(script.to_primitive()) for script in scripts]
|
python
|
def get_edit_scripts(pron_a, pron_b, edit_costs=(1.0, 1.0, 1.0)):
"""Get the edit scripts to transform between two given pronunciations.
:param pron_a: Source pronunciation as list of strings, each string corresponding to a phoneme
:param pron_b: Target pronunciation as list of strings, each string corresponding to a phoneme
:param edit_costs: Costs of insert, replace and delete respectively
:return: List of edit scripts. Each edit script is represented as a list of operations,
where each operation is a dictionary.
"""
op_costs = {'insert': lambda x: edit_costs[0],
'match': lambda x, y: 0 if x == y else edit_costs[1],
'delete': lambda x: edit_costs[2]}
distance, scripts, costs, ops = edit_distance.best_transforms(pron_a, pron_b, op_costs=op_costs)
return [full_edit_script(script.to_primitive()) for script in scripts]
|
[
"def",
"get_edit_scripts",
"(",
"pron_a",
",",
"pron_b",
",",
"edit_costs",
"=",
"(",
"1.0",
",",
"1.0",
",",
"1.0",
")",
")",
":",
"op_costs",
"=",
"{",
"'insert'",
":",
"lambda",
"x",
":",
"edit_costs",
"[",
"0",
"]",
",",
"'match'",
":",
"lambda",
"x",
",",
"y",
":",
"0",
"if",
"x",
"==",
"y",
"else",
"edit_costs",
"[",
"1",
"]",
",",
"'delete'",
":",
"lambda",
"x",
":",
"edit_costs",
"[",
"2",
"]",
"}",
"distance",
",",
"scripts",
",",
"costs",
",",
"ops",
"=",
"edit_distance",
".",
"best_transforms",
"(",
"pron_a",
",",
"pron_b",
",",
"op_costs",
"=",
"op_costs",
")",
"return",
"[",
"full_edit_script",
"(",
"script",
".",
"to_primitive",
"(",
")",
")",
"for",
"script",
"in",
"scripts",
"]"
] |
Get the edit scripts to transform between two given pronunciations.
:param pron_a: Source pronunciation as list of strings, each string corresponding to a phoneme
:param pron_b: Target pronunciation as list of strings, each string corresponding to a phoneme
:param edit_costs: Costs of insert, replace and delete respectively
:return: List of edit scripts. Each edit script is represented as a list of operations,
where each operation is a dictionary.
|
[
"Get",
"the",
"edit",
"scripts",
"to",
"transform",
"between",
"two",
"given",
"pronunciations",
"."
] |
e281c0266a9a9633f34ab70f9c3ad58036c19b59
|
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L712-L727
|
244,178
|
rikrd/inspire
|
inspirespeech/__init__.py
|
Submission._what_default
|
def _what_default(self, pronunciation):
"""Provide the default prediction of the what task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param pronunciation: The list or array of confusion probabilities at each index
"""
token_default = self['metadata']['token_default']['what']
index_count = 2*len(pronunciation) + 1
predictions = {}
for i in range(index_count):
index_predictions = {}
if i % 2 == 0:
index_predictions.update(token_default['0'])
else:
presented_phoneme = pronunciation[int((i-1)/2)]
index_predictions[presented_phoneme] = token_default['1']['=']
index_predictions['*'] = token_default['1']['*']
index_predictions[''] = token_default['1']['']
predictions['{}'.format(i)] = index_predictions
return predictions
|
python
|
def _what_default(self, pronunciation):
"""Provide the default prediction of the what task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param pronunciation: The list or array of confusion probabilities at each index
"""
token_default = self['metadata']['token_default']['what']
index_count = 2*len(pronunciation) + 1
predictions = {}
for i in range(index_count):
index_predictions = {}
if i % 2 == 0:
index_predictions.update(token_default['0'])
else:
presented_phoneme = pronunciation[int((i-1)/2)]
index_predictions[presented_phoneme] = token_default['1']['=']
index_predictions['*'] = token_default['1']['*']
index_predictions[''] = token_default['1']['']
predictions['{}'.format(i)] = index_predictions
return predictions
|
[
"def",
"_what_default",
"(",
"self",
",",
"pronunciation",
")",
":",
"token_default",
"=",
"self",
"[",
"'metadata'",
"]",
"[",
"'token_default'",
"]",
"[",
"'what'",
"]",
"index_count",
"=",
"2",
"*",
"len",
"(",
"pronunciation",
")",
"+",
"1",
"predictions",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"index_count",
")",
":",
"index_predictions",
"=",
"{",
"}",
"if",
"i",
"%",
"2",
"==",
"0",
":",
"index_predictions",
".",
"update",
"(",
"token_default",
"[",
"'0'",
"]",
")",
"else",
":",
"presented_phoneme",
"=",
"pronunciation",
"[",
"int",
"(",
"(",
"i",
"-",
"1",
")",
"/",
"2",
")",
"]",
"index_predictions",
"[",
"presented_phoneme",
"]",
"=",
"token_default",
"[",
"'1'",
"]",
"[",
"'='",
"]",
"index_predictions",
"[",
"'*'",
"]",
"=",
"token_default",
"[",
"'1'",
"]",
"[",
"'*'",
"]",
"index_predictions",
"[",
"''",
"]",
"=",
"token_default",
"[",
"'1'",
"]",
"[",
"''",
"]",
"predictions",
"[",
"'{}'",
".",
"format",
"(",
"i",
")",
"]",
"=",
"index_predictions",
"return",
"predictions"
] |
Provide the default prediction of the what task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param pronunciation: The list or array of confusion probabilities at each index
|
[
"Provide",
"the",
"default",
"prediction",
"of",
"the",
"what",
"task",
"."
] |
e281c0266a9a9633f34ab70f9c3ad58036c19b59
|
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L259-L285
|
244,179
|
rikrd/inspire
|
inspirespeech/__init__.py
|
Submission.where_task
|
def where_task(self, token_id, presented_pronunciation, confusion_probability):
"""Provide the prediction of the where task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param token_id: The token for which the prediction is being provided
:param confusion_probability: The list or array of confusion probabilities at each index
"""
self['tokens'].setdefault(token_id, {}) \
.setdefault('where', self._where_default(presented_pronunciation))
if confusion_probability is not None:
self['tokens'][token_id]['where'] = list(confusion_probability)
|
python
|
def where_task(self, token_id, presented_pronunciation, confusion_probability):
"""Provide the prediction of the where task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param token_id: The token for which the prediction is being provided
:param confusion_probability: The list or array of confusion probabilities at each index
"""
self['tokens'].setdefault(token_id, {}) \
.setdefault('where', self._where_default(presented_pronunciation))
if confusion_probability is not None:
self['tokens'][token_id]['where'] = list(confusion_probability)
|
[
"def",
"where_task",
"(",
"self",
",",
"token_id",
",",
"presented_pronunciation",
",",
"confusion_probability",
")",
":",
"self",
"[",
"'tokens'",
"]",
".",
"setdefault",
"(",
"token_id",
",",
"{",
"}",
")",
".",
"setdefault",
"(",
"'where'",
",",
"self",
".",
"_where_default",
"(",
"presented_pronunciation",
")",
")",
"if",
"confusion_probability",
"is",
"not",
"None",
":",
"self",
"[",
"'tokens'",
"]",
"[",
"token_id",
"]",
"[",
"'where'",
"]",
"=",
"list",
"(",
"confusion_probability",
")"
] |
Provide the prediction of the where task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param token_id: The token for which the prediction is being provided
:param confusion_probability: The list or array of confusion probabilities at each index
|
[
"Provide",
"the",
"prediction",
"of",
"the",
"where",
"task",
"."
] |
e281c0266a9a9633f34ab70f9c3ad58036c19b59
|
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L311-L323
|
244,180
|
rikrd/inspire
|
inspirespeech/__init__.py
|
Submission.what_task
|
def what_task(self, token_id, presented_pronunciation, index, phonemes, phonemes_probability,
warn=True, default=True):
"""Provide the prediction of the what task.
This function is used to predict the probability of a given phoneme being reported at a given index
for a given token.
:param token_id: The token for which the prediction is provided
:param index: The index of the token for which the prediction is provided
:param phonemes: The phoneme or phoneme sequence for which the prediction is being made
(as a space separated string)
:param phonemes_probability: The probability of the phoneme or phoneme sequence
:param warn: Set to False in order to avoid warnings about 0 or 1 probabilities
:param default: Set to False in order to avoid generating the default probabilities
"""
if phonemes_probability is not None and not 0. < phonemes_probability < 1. and warn:
logging.warning('Setting a probability of [{}] to phonemes [{}] for token [{}].\n '
'Using probabilities of 0.0 or 1.0 '
'may lead to likelihoods of -Infinity'.format(phonemes_probability,
phonemes,
token_id))
default_preds = self._what_default(presented_pronunciation) if default else {}
self['tokens'].setdefault(token_id, {}) \
.setdefault('what', default_preds)
if index is not None:
self['tokens'][token_id]['what'].setdefault(str(index), {})
if phonemes is not None:
if phonemes_probability is not None and index is not None:
self['tokens'][token_id]['what'][str(index)][phonemes] = phonemes_probability
else:
if index is not None:
if phonemes in default_preds[str(index)]:
self['tokens'][token_id]['what'][str(index)][phonemes] = default_preds[str(index)][phonemes]
else:
self['tokens'][token_id]['what'][str(index)].pop(phonemes)
else:
if str(index) in default_preds:
self['tokens'][token_id]['what'][str(index)] = default_preds[str(index)]
else:
self['tokens'][token_id]['what'].pop(str(index))
|
python
|
def what_task(self, token_id, presented_pronunciation, index, phonemes, phonemes_probability,
warn=True, default=True):
"""Provide the prediction of the what task.
This function is used to predict the probability of a given phoneme being reported at a given index
for a given token.
:param token_id: The token for which the prediction is provided
:param index: The index of the token for which the prediction is provided
:param phonemes: The phoneme or phoneme sequence for which the prediction is being made
(as a space separated string)
:param phonemes_probability: The probability of the phoneme or phoneme sequence
:param warn: Set to False in order to avoid warnings about 0 or 1 probabilities
:param default: Set to False in order to avoid generating the default probabilities
"""
if phonemes_probability is not None and not 0. < phonemes_probability < 1. and warn:
logging.warning('Setting a probability of [{}] to phonemes [{}] for token [{}].\n '
'Using probabilities of 0.0 or 1.0 '
'may lead to likelihoods of -Infinity'.format(phonemes_probability,
phonemes,
token_id))
default_preds = self._what_default(presented_pronunciation) if default else {}
self['tokens'].setdefault(token_id, {}) \
.setdefault('what', default_preds)
if index is not None:
self['tokens'][token_id]['what'].setdefault(str(index), {})
if phonemes is not None:
if phonemes_probability is not None and index is not None:
self['tokens'][token_id]['what'][str(index)][phonemes] = phonemes_probability
else:
if index is not None:
if phonemes in default_preds[str(index)]:
self['tokens'][token_id]['what'][str(index)][phonemes] = default_preds[str(index)][phonemes]
else:
self['tokens'][token_id]['what'][str(index)].pop(phonemes)
else:
if str(index) in default_preds:
self['tokens'][token_id]['what'][str(index)] = default_preds[str(index)]
else:
self['tokens'][token_id]['what'].pop(str(index))
|
[
"def",
"what_task",
"(",
"self",
",",
"token_id",
",",
"presented_pronunciation",
",",
"index",
",",
"phonemes",
",",
"phonemes_probability",
",",
"warn",
"=",
"True",
",",
"default",
"=",
"True",
")",
":",
"if",
"phonemes_probability",
"is",
"not",
"None",
"and",
"not",
"0.",
"<",
"phonemes_probability",
"<",
"1.",
"and",
"warn",
":",
"logging",
".",
"warning",
"(",
"'Setting a probability of [{}] to phonemes [{}] for token [{}].\\n '",
"'Using probabilities of 0.0 or 1.0 '",
"'may lead to likelihoods of -Infinity'",
".",
"format",
"(",
"phonemes_probability",
",",
"phonemes",
",",
"token_id",
")",
")",
"default_preds",
"=",
"self",
".",
"_what_default",
"(",
"presented_pronunciation",
")",
"if",
"default",
"else",
"{",
"}",
"self",
"[",
"'tokens'",
"]",
".",
"setdefault",
"(",
"token_id",
",",
"{",
"}",
")",
".",
"setdefault",
"(",
"'what'",
",",
"default_preds",
")",
"if",
"index",
"is",
"not",
"None",
":",
"self",
"[",
"'tokens'",
"]",
"[",
"token_id",
"]",
"[",
"'what'",
"]",
".",
"setdefault",
"(",
"str",
"(",
"index",
")",
",",
"{",
"}",
")",
"if",
"phonemes",
"is",
"not",
"None",
":",
"if",
"phonemes_probability",
"is",
"not",
"None",
"and",
"index",
"is",
"not",
"None",
":",
"self",
"[",
"'tokens'",
"]",
"[",
"token_id",
"]",
"[",
"'what'",
"]",
"[",
"str",
"(",
"index",
")",
"]",
"[",
"phonemes",
"]",
"=",
"phonemes_probability",
"else",
":",
"if",
"index",
"is",
"not",
"None",
":",
"if",
"phonemes",
"in",
"default_preds",
"[",
"str",
"(",
"index",
")",
"]",
":",
"self",
"[",
"'tokens'",
"]",
"[",
"token_id",
"]",
"[",
"'what'",
"]",
"[",
"str",
"(",
"index",
")",
"]",
"[",
"phonemes",
"]",
"=",
"default_preds",
"[",
"str",
"(",
"index",
")",
"]",
"[",
"phonemes",
"]",
"else",
":",
"self",
"[",
"'tokens'",
"]",
"[",
"token_id",
"]",
"[",
"'what'",
"]",
"[",
"str",
"(",
"index",
")",
"]",
".",
"pop",
"(",
"phonemes",
")",
"else",
":",
"if",
"str",
"(",
"index",
")",
"in",
"default_preds",
":",
"self",
"[",
"'tokens'",
"]",
"[",
"token_id",
"]",
"[",
"'what'",
"]",
"[",
"str",
"(",
"index",
")",
"]",
"=",
"default_preds",
"[",
"str",
"(",
"index",
")",
"]",
"else",
":",
"self",
"[",
"'tokens'",
"]",
"[",
"token_id",
"]",
"[",
"'what'",
"]",
".",
"pop",
"(",
"str",
"(",
"index",
")",
")"
] |
Provide the prediction of the what task.
This function is used to predict the probability of a given phoneme being reported at a given index
for a given token.
:param token_id: The token for which the prediction is provided
:param index: The index of the token for which the prediction is provided
:param phonemes: The phoneme or phoneme sequence for which the prediction is being made
(as a space separated string)
:param phonemes_probability: The probability of the phoneme or phoneme sequence
:param warn: Set to False in order to avoid warnings about 0 or 1 probabilities
:param default: Set to False in order to avoid generating the default probabilities
|
[
"Provide",
"the",
"prediction",
"of",
"the",
"what",
"task",
"."
] |
e281c0266a9a9633f34ab70f9c3ad58036c19b59
|
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L325-L371
|
244,181
|
rikrd/inspire
|
inspirespeech/__init__.py
|
Submission.full_task
|
def full_task(self, token_id, presented_pronunciation, pronunciation, pronunciation_probability,
warn=True, default=True):
"""Provide the prediction of the full task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param token_id: The token for which the prediction is provided
:param pronunciation: The pronunciation for which the prediction is being made (as a list of strings
or space separated string)
:param pronunciation_probability: The probability of the pronunciation for the given token
:param warn: Set to False in order to avoid warnings about 0 or 1 probabilities
:param default: Set to False in order to avoid generating the default probabilities
"""
if pronunciation_probability is not None and not 0. < pronunciation_probability < 1. and warn:
logging.warning('Setting a probability of [{}] to pronunciation [{}] for token [{}].\n '
'Using probabilities of 0.0 or 1.0 '
'may lead to likelihoods of -Infinity'.format(pronunciation_probability,
pronunciation,
token_id))
key = pronunciation
if isinstance(key, list):
if not all([isinstance(phoneme, basestring) for phoneme in key]):
raise ValueError('The pronunciation must be of type string (a sequence of space separated phonemes) '
'or of type list (containing phonemes of type strings).'
'User supplied: {}'.format(key))
key = ' '.join(pronunciation)
default_preds = self._full_default(presented_pronunciation) if default else {}
self['tokens'].setdefault(token_id, {}) \
.setdefault('full', default_preds)
if key is not None:
if pronunciation_probability is not None:
self['tokens'][token_id]['full'][key] = pronunciation_probability
else:
if key in default_preds:
self['tokens'][token_id]['full'][key] = default_preds[key]
else:
self['tokens'][token_id]['full'].pop(key)
|
python
|
def full_task(self, token_id, presented_pronunciation, pronunciation, pronunciation_probability,
warn=True, default=True):
"""Provide the prediction of the full task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param token_id: The token for which the prediction is provided
:param pronunciation: The pronunciation for which the prediction is being made (as a list of strings
or space separated string)
:param pronunciation_probability: The probability of the pronunciation for the given token
:param warn: Set to False in order to avoid warnings about 0 or 1 probabilities
:param default: Set to False in order to avoid generating the default probabilities
"""
if pronunciation_probability is not None and not 0. < pronunciation_probability < 1. and warn:
logging.warning('Setting a probability of [{}] to pronunciation [{}] for token [{}].\n '
'Using probabilities of 0.0 or 1.0 '
'may lead to likelihoods of -Infinity'.format(pronunciation_probability,
pronunciation,
token_id))
key = pronunciation
if isinstance(key, list):
if not all([isinstance(phoneme, basestring) for phoneme in key]):
raise ValueError('The pronunciation must be of type string (a sequence of space separated phonemes) '
'or of type list (containing phonemes of type strings).'
'User supplied: {}'.format(key))
key = ' '.join(pronunciation)
default_preds = self._full_default(presented_pronunciation) if default else {}
self['tokens'].setdefault(token_id, {}) \
.setdefault('full', default_preds)
if key is not None:
if pronunciation_probability is not None:
self['tokens'][token_id]['full'][key] = pronunciation_probability
else:
if key in default_preds:
self['tokens'][token_id]['full'][key] = default_preds[key]
else:
self['tokens'][token_id]['full'].pop(key)
|
[
"def",
"full_task",
"(",
"self",
",",
"token_id",
",",
"presented_pronunciation",
",",
"pronunciation",
",",
"pronunciation_probability",
",",
"warn",
"=",
"True",
",",
"default",
"=",
"True",
")",
":",
"if",
"pronunciation_probability",
"is",
"not",
"None",
"and",
"not",
"0.",
"<",
"pronunciation_probability",
"<",
"1.",
"and",
"warn",
":",
"logging",
".",
"warning",
"(",
"'Setting a probability of [{}] to pronunciation [{}] for token [{}].\\n '",
"'Using probabilities of 0.0 or 1.0 '",
"'may lead to likelihoods of -Infinity'",
".",
"format",
"(",
"pronunciation_probability",
",",
"pronunciation",
",",
"token_id",
")",
")",
"key",
"=",
"pronunciation",
"if",
"isinstance",
"(",
"key",
",",
"list",
")",
":",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"phoneme",
",",
"basestring",
")",
"for",
"phoneme",
"in",
"key",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'The pronunciation must be of type string (a sequence of space separated phonemes) '",
"'or of type list (containing phonemes of type strings).'",
"'User supplied: {}'",
".",
"format",
"(",
"key",
")",
")",
"key",
"=",
"' '",
".",
"join",
"(",
"pronunciation",
")",
"default_preds",
"=",
"self",
".",
"_full_default",
"(",
"presented_pronunciation",
")",
"if",
"default",
"else",
"{",
"}",
"self",
"[",
"'tokens'",
"]",
".",
"setdefault",
"(",
"token_id",
",",
"{",
"}",
")",
".",
"setdefault",
"(",
"'full'",
",",
"default_preds",
")",
"if",
"key",
"is",
"not",
"None",
":",
"if",
"pronunciation_probability",
"is",
"not",
"None",
":",
"self",
"[",
"'tokens'",
"]",
"[",
"token_id",
"]",
"[",
"'full'",
"]",
"[",
"key",
"]",
"=",
"pronunciation_probability",
"else",
":",
"if",
"key",
"in",
"default_preds",
":",
"self",
"[",
"'tokens'",
"]",
"[",
"token_id",
"]",
"[",
"'full'",
"]",
"[",
"key",
"]",
"=",
"default_preds",
"[",
"key",
"]",
"else",
":",
"self",
"[",
"'tokens'",
"]",
"[",
"token_id",
"]",
"[",
"'full'",
"]",
".",
"pop",
"(",
"key",
")"
] |
Provide the prediction of the full task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param token_id: The token for which the prediction is provided
:param pronunciation: The pronunciation for which the prediction is being made (as a list of strings
or space separated string)
:param pronunciation_probability: The probability of the pronunciation for the given token
:param warn: Set to False in order to avoid warnings about 0 or 1 probabilities
:param default: Set to False in order to avoid generating the default probabilities
|
[
"Provide",
"the",
"prediction",
"of",
"the",
"full",
"task",
"."
] |
e281c0266a9a9633f34ab70f9c3ad58036c19b59
|
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L373-L417
|
244,182
|
rikrd/inspire
|
inspirespeech/__init__.py
|
Submission.load
|
def load(fileobj):
"""Load the submission from a file-like object
:param fileobj: File-like object
:return: the loaded submission
"""
with gzip.GzipFile(fileobj=fileobj, mode='r') as z:
submission = Submission(metadata=json.loads(z.readline()))
for line in z:
token_id, token = json.loads(line)
submission['tokens'][token_id] = token
return submission
|
python
|
def load(fileobj):
"""Load the submission from a file-like object
:param fileobj: File-like object
:return: the loaded submission
"""
with gzip.GzipFile(fileobj=fileobj, mode='r') as z:
submission = Submission(metadata=json.loads(z.readline()))
for line in z:
token_id, token = json.loads(line)
submission['tokens'][token_id] = token
return submission
|
[
"def",
"load",
"(",
"fileobj",
")",
":",
"with",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"fileobj",
",",
"mode",
"=",
"'r'",
")",
"as",
"z",
":",
"submission",
"=",
"Submission",
"(",
"metadata",
"=",
"json",
".",
"loads",
"(",
"z",
".",
"readline",
"(",
")",
")",
")",
"for",
"line",
"in",
"z",
":",
"token_id",
",",
"token",
"=",
"json",
".",
"loads",
"(",
"line",
")",
"submission",
"[",
"'tokens'",
"]",
"[",
"token_id",
"]",
"=",
"token",
"return",
"submission"
] |
Load the submission from a file-like object
:param fileobj: File-like object
:return: the loaded submission
|
[
"Load",
"the",
"submission",
"from",
"a",
"file",
"-",
"like",
"object"
] |
e281c0266a9a9633f34ab70f9c3ad58036c19b59
|
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L481-L494
|
244,183
|
rikrd/inspire
|
inspirespeech/__init__.py
|
Submission.load_metadata
|
def load_metadata(fileobj):
"""Load the submission from a file.
:param filename: where to load the submission from
"""
with gzip.GzipFile(fileobj=fileobj, mode='r') as z:
return json.loads(z.readline())
|
python
|
def load_metadata(fileobj):
"""Load the submission from a file.
:param filename: where to load the submission from
"""
with gzip.GzipFile(fileobj=fileobj, mode='r') as z:
return json.loads(z.readline())
|
[
"def",
"load_metadata",
"(",
"fileobj",
")",
":",
"with",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"fileobj",
",",
"mode",
"=",
"'r'",
")",
"as",
"z",
":",
"return",
"json",
".",
"loads",
"(",
"z",
".",
"readline",
"(",
")",
")"
] |
Load the submission from a file.
:param filename: where to load the submission from
|
[
"Load",
"the",
"submission",
"from",
"a",
"file",
"."
] |
e281c0266a9a9633f34ab70f9c3ad58036c19b59
|
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L502-L508
|
244,184
|
rikrd/inspire
|
inspirespeech/__init__.py
|
Submission.submit
|
def submit(self, password=''):
"""Submits the participation to the web site.
The passwords is sent as plain text.
:return: the evaluation results.
"""
url = '{}/api/submit'.format(BASE_URL)
try:
r = requests.post(url,
data=self.dumps(),
headers={'content-type': 'application/json'},
auth=(self['metadata']['email'], password))
response = r.json()
except requests.exceptions.HTTPError as e:
logging.error('Error while submitting the participation. {}'.format(e))
return Job()
if 'error' in response:
logging.error('Error while processing the participation. {}'.format(response['error']))
return Job()
return Job(response)
|
python
|
def submit(self, password=''):
"""Submits the participation to the web site.
The passwords is sent as plain text.
:return: the evaluation results.
"""
url = '{}/api/submit'.format(BASE_URL)
try:
r = requests.post(url,
data=self.dumps(),
headers={'content-type': 'application/json'},
auth=(self['metadata']['email'], password))
response = r.json()
except requests.exceptions.HTTPError as e:
logging.error('Error while submitting the participation. {}'.format(e))
return Job()
if 'error' in response:
logging.error('Error while processing the participation. {}'.format(response['error']))
return Job()
return Job(response)
|
[
"def",
"submit",
"(",
"self",
",",
"password",
"=",
"''",
")",
":",
"url",
"=",
"'{}/api/submit'",
".",
"format",
"(",
"BASE_URL",
")",
"try",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"data",
"=",
"self",
".",
"dumps",
"(",
")",
",",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
",",
"auth",
"=",
"(",
"self",
"[",
"'metadata'",
"]",
"[",
"'email'",
"]",
",",
"password",
")",
")",
"response",
"=",
"r",
".",
"json",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"'Error while submitting the participation. {}'",
".",
"format",
"(",
"e",
")",
")",
"return",
"Job",
"(",
")",
"if",
"'error'",
"in",
"response",
":",
"logging",
".",
"error",
"(",
"'Error while processing the participation. {}'",
".",
"format",
"(",
"response",
"[",
"'error'",
"]",
")",
")",
"return",
"Job",
"(",
")",
"return",
"Job",
"(",
"response",
")"
] |
Submits the participation to the web site.
The passwords is sent as plain text.
:return: the evaluation results.
|
[
"Submits",
"the",
"participation",
"to",
"the",
"web",
"site",
"."
] |
e281c0266a9a9633f34ab70f9c3ad58036c19b59
|
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L525-L550
|
244,185
|
rikrd/inspire
|
inspirespeech/__init__.py
|
Submission.evaluate
|
def evaluate(self, password=''):
"""Evaluates the development set.
The passwords is sent as plain text.
:return: the evaluation results.
"""
# Make a copy only keeping the development set
dev_submission = self
if self['metadata'].get('evaluation_setting', {}).get('development_set', None):
dev_submission = copy.deepcopy(self)
dev_submission['tokens'] = {token_id: token for token_id, token in self['tokens'].items()
if token_id in self['metadata']['evaluation_setting']['development_set']}
url = '{}/api/evaluate'.format(BASE_URL)
try:
r = requests.post(url,
data=dev_submission.dumps(),
headers={'content-type': 'application/json'},
auth=(dev_submission['metadata']['email'], password))
response = r.json()
except requests.exceptions.HTTPError as e:
logging.error('Error while submitting the participation. {}'.format(e))
return Job()
if 'error' in response:
logging.error('Error while processing the participation. {}'.format(response['error']))
return Job()
return Job(response)
|
python
|
def evaluate(self, password=''):
"""Evaluates the development set.
The passwords is sent as plain text.
:return: the evaluation results.
"""
# Make a copy only keeping the development set
dev_submission = self
if self['metadata'].get('evaluation_setting', {}).get('development_set', None):
dev_submission = copy.deepcopy(self)
dev_submission['tokens'] = {token_id: token for token_id, token in self['tokens'].items()
if token_id in self['metadata']['evaluation_setting']['development_set']}
url = '{}/api/evaluate'.format(BASE_URL)
try:
r = requests.post(url,
data=dev_submission.dumps(),
headers={'content-type': 'application/json'},
auth=(dev_submission['metadata']['email'], password))
response = r.json()
except requests.exceptions.HTTPError as e:
logging.error('Error while submitting the participation. {}'.format(e))
return Job()
if 'error' in response:
logging.error('Error while processing the participation. {}'.format(response['error']))
return Job()
return Job(response)
|
[
"def",
"evaluate",
"(",
"self",
",",
"password",
"=",
"''",
")",
":",
"# Make a copy only keeping the development set",
"dev_submission",
"=",
"self",
"if",
"self",
"[",
"'metadata'",
"]",
".",
"get",
"(",
"'evaluation_setting'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'development_set'",
",",
"None",
")",
":",
"dev_submission",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
")",
"dev_submission",
"[",
"'tokens'",
"]",
"=",
"{",
"token_id",
":",
"token",
"for",
"token_id",
",",
"token",
"in",
"self",
"[",
"'tokens'",
"]",
".",
"items",
"(",
")",
"if",
"token_id",
"in",
"self",
"[",
"'metadata'",
"]",
"[",
"'evaluation_setting'",
"]",
"[",
"'development_set'",
"]",
"}",
"url",
"=",
"'{}/api/evaluate'",
".",
"format",
"(",
"BASE_URL",
")",
"try",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"data",
"=",
"dev_submission",
".",
"dumps",
"(",
")",
",",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
",",
"auth",
"=",
"(",
"dev_submission",
"[",
"'metadata'",
"]",
"[",
"'email'",
"]",
",",
"password",
")",
")",
"response",
"=",
"r",
".",
"json",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"'Error while submitting the participation. {}'",
".",
"format",
"(",
"e",
")",
")",
"return",
"Job",
"(",
")",
"if",
"'error'",
"in",
"response",
":",
"logging",
".",
"error",
"(",
"'Error while processing the participation. {}'",
".",
"format",
"(",
"response",
"[",
"'error'",
"]",
")",
")",
"return",
"Job",
"(",
")",
"return",
"Job",
"(",
"response",
")"
] |
Evaluates the development set.
The passwords is sent as plain text.
:return: the evaluation results.
|
[
"Evaluates",
"the",
"development",
"set",
"."
] |
e281c0266a9a9633f34ab70f9c3ad58036c19b59
|
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L552-L584
|
244,186
|
stephanepechard/projy
|
projy/collectors/AuthorMailCollector.py
|
AuthorMailCollector.author_mail_from_git
|
def author_mail_from_git(self):
""" Get the author mail from git information. """
try:
# launch git command and get answer
cmd = Popen(["git", "config", "--get", "user.email"], stdout=PIPE)
stdoutdata = cmd.communicate()
if (stdoutdata[0]):
self.author_mail = stdoutdata[0].rstrip(os.linesep)
except ImportError:
pass
except CalledProcessError:
pass
except OSError:
pass
return self.author_mail
|
python
|
def author_mail_from_git(self):
""" Get the author mail from git information. """
try:
# launch git command and get answer
cmd = Popen(["git", "config", "--get", "user.email"], stdout=PIPE)
stdoutdata = cmd.communicate()
if (stdoutdata[0]):
self.author_mail = stdoutdata[0].rstrip(os.linesep)
except ImportError:
pass
except CalledProcessError:
pass
except OSError:
pass
return self.author_mail
|
[
"def",
"author_mail_from_git",
"(",
"self",
")",
":",
"try",
":",
"# launch git command and get answer",
"cmd",
"=",
"Popen",
"(",
"[",
"\"git\"",
",",
"\"config\"",
",",
"\"--get\"",
",",
"\"user.email\"",
"]",
",",
"stdout",
"=",
"PIPE",
")",
"stdoutdata",
"=",
"cmd",
".",
"communicate",
"(",
")",
"if",
"(",
"stdoutdata",
"[",
"0",
"]",
")",
":",
"self",
".",
"author_mail",
"=",
"stdoutdata",
"[",
"0",
"]",
".",
"rstrip",
"(",
"os",
".",
"linesep",
")",
"except",
"ImportError",
":",
"pass",
"except",
"CalledProcessError",
":",
"pass",
"except",
"OSError",
":",
"pass",
"return",
"self",
".",
"author_mail"
] |
Get the author mail from git information.
|
[
"Get",
"the",
"author",
"mail",
"from",
"git",
"information",
"."
] |
3146b0e3c207b977e1b51fcb33138746dae83c23
|
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/collectors/AuthorMailCollector.py#L24-L39
|
244,187
|
stephanepechard/projy
|
projy/collectors/AuthorMailCollector.py
|
AuthorMailCollector.author_mail_from_system
|
def author_mail_from_system(self):
""" Get the author mail from system information.
It is probably often innacurate.
"""
self.author_mail = getpass.getuser() + '@' + socket.gethostname()
return self.author_mail
|
python
|
def author_mail_from_system(self):
""" Get the author mail from system information.
It is probably often innacurate.
"""
self.author_mail = getpass.getuser() + '@' + socket.gethostname()
return self.author_mail
|
[
"def",
"author_mail_from_system",
"(",
"self",
")",
":",
"self",
".",
"author_mail",
"=",
"getpass",
".",
"getuser",
"(",
")",
"+",
"'@'",
"+",
"socket",
".",
"gethostname",
"(",
")",
"return",
"self",
".",
"author_mail"
] |
Get the author mail from system information.
It is probably often innacurate.
|
[
"Get",
"the",
"author",
"mail",
"from",
"system",
"information",
".",
"It",
"is",
"probably",
"often",
"innacurate",
"."
] |
3146b0e3c207b977e1b51fcb33138746dae83c23
|
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/collectors/AuthorMailCollector.py#L42-L47
|
244,188
|
emory-libraries/eulcommon
|
eulcommon/djangoextras/http/decorators.py
|
content_negotiation
|
def content_negotiation(formats, default_type='text/html'):
"""
Provides basic content negotiation and returns a view method based on the
best match of content types as indicated in formats.
:param formats: dictionary of content types and corresponding methods
:param default_type: string the decorated method is the return type for.
Example usage::
def rdf_view(request, arg):
return RDF_RESPONSE
@content_negotiation({'application/rdf+xml': rdf_view})
def html_view(request, arg):
return HTML_RESPONSE
The above example would return the rdf_view on a request type of
``application/rdf+xml`` and the normal view for anything else.
Any :class:`django.http.HttpResponse` returned by the view method chosen
by content negotiation will have a 'Vary: Accept' HTTP header added.
**NOTE:** Some web browsers do content negotiation poorly, requesting
``application/xml`` when what they really want is ``application/xhtml+xml`` or
``text/html``. When this type of Accept request is detected, the default type
will be returned rather than the best match that would be determined by parsing
the Accept string properly (since in some cases the best match is
``application/xml``, which could return non-html content inappropriate for
display in a web browser).
"""
def _decorator(view_method):
@wraps(view_method)
def _wrapped(request, *args, **kwargs):
# Changed this to be a value passed as a method argument defaulting
# to text/html instead so it's more flexible.
# default_type = 'text/html' # If not specificied assume HTML request.
# Add text/html for the original method if not already included.
if default_type not in formats:
formats[default_type] = view_method
try:
req_type = request.META['HTTP_ACCEPT']
# If this request is coming from a browser like that, just
# give them our default type instead of honoring the actual best match
# (see note above for more detail)
if '*/*' in req_type:
req_type = default_type
except KeyError:
req_type = default_type
# Get the best match for the content type requested.
content_type = mimeparse.best_match(formats.keys(),
req_type)
# Return the view matching content type or the original view
# if no match.
if not content_type or content_type not in formats:
response = view_method(request, *args, **kwargs)
else:
response = formats[content_type](request, *args, **kwargs)
# set a Vary header to indicate content may vary based on Accept header
if isinstance(response, HttpResponse): # views should return HttpResponse objects, but check to be sure
# note: using the same utility method used by django's vary_on_headers decorator
patch_vary_headers(response, ['Accept'])
return response
return _wrapped
return _decorator
|
python
|
def content_negotiation(formats, default_type='text/html'):
"""
Provides basic content negotiation and returns a view method based on the
best match of content types as indicated in formats.
:param formats: dictionary of content types and corresponding methods
:param default_type: string the decorated method is the return type for.
Example usage::
def rdf_view(request, arg):
return RDF_RESPONSE
@content_negotiation({'application/rdf+xml': rdf_view})
def html_view(request, arg):
return HTML_RESPONSE
The above example would return the rdf_view on a request type of
``application/rdf+xml`` and the normal view for anything else.
Any :class:`django.http.HttpResponse` returned by the view method chosen
by content negotiation will have a 'Vary: Accept' HTTP header added.
**NOTE:** Some web browsers do content negotiation poorly, requesting
``application/xml`` when what they really want is ``application/xhtml+xml`` or
``text/html``. When this type of Accept request is detected, the default type
will be returned rather than the best match that would be determined by parsing
the Accept string properly (since in some cases the best match is
``application/xml``, which could return non-html content inappropriate for
display in a web browser).
"""
def _decorator(view_method):
@wraps(view_method)
def _wrapped(request, *args, **kwargs):
# Changed this to be a value passed as a method argument defaulting
# to text/html instead so it's more flexible.
# default_type = 'text/html' # If not specificied assume HTML request.
# Add text/html for the original method if not already included.
if default_type not in formats:
formats[default_type] = view_method
try:
req_type = request.META['HTTP_ACCEPT']
# If this request is coming from a browser like that, just
# give them our default type instead of honoring the actual best match
# (see note above for more detail)
if '*/*' in req_type:
req_type = default_type
except KeyError:
req_type = default_type
# Get the best match for the content type requested.
content_type = mimeparse.best_match(formats.keys(),
req_type)
# Return the view matching content type or the original view
# if no match.
if not content_type or content_type not in formats:
response = view_method(request, *args, **kwargs)
else:
response = formats[content_type](request, *args, **kwargs)
# set a Vary header to indicate content may vary based on Accept header
if isinstance(response, HttpResponse): # views should return HttpResponse objects, but check to be sure
# note: using the same utility method used by django's vary_on_headers decorator
patch_vary_headers(response, ['Accept'])
return response
return _wrapped
return _decorator
|
[
"def",
"content_negotiation",
"(",
"formats",
",",
"default_type",
"=",
"'text/html'",
")",
":",
"def",
"_decorator",
"(",
"view_method",
")",
":",
"@",
"wraps",
"(",
"view_method",
")",
"def",
"_wrapped",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Changed this to be a value passed as a method argument defaulting",
"# to text/html instead so it's more flexible.",
"# default_type = 'text/html' # If not specificied assume HTML request.",
"# Add text/html for the original method if not already included.",
"if",
"default_type",
"not",
"in",
"formats",
":",
"formats",
"[",
"default_type",
"]",
"=",
"view_method",
"try",
":",
"req_type",
"=",
"request",
".",
"META",
"[",
"'HTTP_ACCEPT'",
"]",
"# If this request is coming from a browser like that, just",
"# give them our default type instead of honoring the actual best match",
"# (see note above for more detail)",
"if",
"'*/*'",
"in",
"req_type",
":",
"req_type",
"=",
"default_type",
"except",
"KeyError",
":",
"req_type",
"=",
"default_type",
"# Get the best match for the content type requested.",
"content_type",
"=",
"mimeparse",
".",
"best_match",
"(",
"formats",
".",
"keys",
"(",
")",
",",
"req_type",
")",
"# Return the view matching content type or the original view",
"# if no match.",
"if",
"not",
"content_type",
"or",
"content_type",
"not",
"in",
"formats",
":",
"response",
"=",
"view_method",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"response",
"=",
"formats",
"[",
"content_type",
"]",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# set a Vary header to indicate content may vary based on Accept header ",
"if",
"isinstance",
"(",
"response",
",",
"HttpResponse",
")",
":",
"# views should return HttpResponse objects, but check to be sure",
"# note: using the same utility method used by django's vary_on_headers decorator",
"patch_vary_headers",
"(",
"response",
",",
"[",
"'Accept'",
"]",
")",
"return",
"response",
"return",
"_wrapped",
"return",
"_decorator"
] |
Provides basic content negotiation and returns a view method based on the
best match of content types as indicated in formats.
:param formats: dictionary of content types and corresponding methods
:param default_type: string the decorated method is the return type for.
Example usage::
def rdf_view(request, arg):
return RDF_RESPONSE
@content_negotiation({'application/rdf+xml': rdf_view})
def html_view(request, arg):
return HTML_RESPONSE
The above example would return the rdf_view on a request type of
``application/rdf+xml`` and the normal view for anything else.
Any :class:`django.http.HttpResponse` returned by the view method chosen
by content negotiation will have a 'Vary: Accept' HTTP header added.
**NOTE:** Some web browsers do content negotiation poorly, requesting
``application/xml`` when what they really want is ``application/xhtml+xml`` or
``text/html``. When this type of Accept request is detected, the default type
will be returned rather than the best match that would be determined by parsing
the Accept string properly (since in some cases the best match is
``application/xml``, which could return non-html content inappropriate for
display in a web browser).
|
[
"Provides",
"basic",
"content",
"negotiation",
"and",
"returns",
"a",
"view",
"method",
"based",
"on",
"the",
"best",
"match",
"of",
"content",
"types",
"as",
"indicated",
"in",
"formats",
"."
] |
dc63a9b3b5e38205178235e0d716d1b28158d3a9
|
https://github.com/emory-libraries/eulcommon/blob/dc63a9b3b5e38205178235e0d716d1b28158d3a9/eulcommon/djangoextras/http/decorators.py#L23-L94
|
244,189
|
slarse/clanimtk
|
clanimtk/decorator.py
|
animation
|
def animation(frame_function: types.FrameFunction) -> types.Animation:
"""Turn a FrameFunction into an Animation.
Args:
frame_function: A function that returns a FrameGenerator.
Returns:
an Animation decorator function.
"""
animation_ = core.Animation(frame_function)
@functools.wraps(frame_function)
def wrapper(*args, **kwargs):
return animation_(*args, **kwargs)
return wrapper
|
python
|
def animation(frame_function: types.FrameFunction) -> types.Animation:
"""Turn a FrameFunction into an Animation.
Args:
frame_function: A function that returns a FrameGenerator.
Returns:
an Animation decorator function.
"""
animation_ = core.Animation(frame_function)
@functools.wraps(frame_function)
def wrapper(*args, **kwargs):
return animation_(*args, **kwargs)
return wrapper
|
[
"def",
"animation",
"(",
"frame_function",
":",
"types",
".",
"FrameFunction",
")",
"->",
"types",
".",
"Animation",
":",
"animation_",
"=",
"core",
".",
"Animation",
"(",
"frame_function",
")",
"@",
"functools",
".",
"wraps",
"(",
"frame_function",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"animation_",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] |
Turn a FrameFunction into an Animation.
Args:
frame_function: A function that returns a FrameGenerator.
Returns:
an Animation decorator function.
|
[
"Turn",
"a",
"FrameFunction",
"into",
"an",
"Animation",
"."
] |
cb93d2e914c3ecc4e0007745ff4d546318cf3902
|
https://github.com/slarse/clanimtk/blob/cb93d2e914c3ecc4e0007745ff4d546318cf3902/clanimtk/decorator.py#L18-L33
|
244,190
|
slarse/clanimtk
|
clanimtk/decorator.py
|
multiline_frame_function
|
def multiline_frame_function(frame_function: types.FrameFunction,
height: int,
offset: int = 0,
*args,
**kwargs) -> types.FrameGenerator:
"""Multiline a singlelined frame function. Simply chains several frame
generators together, and applies the specified offset to each one.
Args:
frame_function: A function that returns a singleline FrameGenerator.
height: The amount of frame generators to stack vertically (determines
the height in characters).
offset: An offset to apply to each successive generator. If the offset
is 2, then the first generator starts at frame 0, the second at frame
2, the third at frame 4, and so on.
Returns:
a multiline version fo the generator returned by frame_function
"""
frame_generators = []
for i in range(height):
frame_generators.append(frame_function(*args, **kwargs))
for _ in range(i * offset): # advance animation
frame_generators[i].__next__()
frame_gen = concatechain(*frame_generators, separator='\n')
yield from frame_gen
|
python
|
def multiline_frame_function(frame_function: types.FrameFunction,
height: int,
offset: int = 0,
*args,
**kwargs) -> types.FrameGenerator:
"""Multiline a singlelined frame function. Simply chains several frame
generators together, and applies the specified offset to each one.
Args:
frame_function: A function that returns a singleline FrameGenerator.
height: The amount of frame generators to stack vertically (determines
the height in characters).
offset: An offset to apply to each successive generator. If the offset
is 2, then the first generator starts at frame 0, the second at frame
2, the third at frame 4, and so on.
Returns:
a multiline version fo the generator returned by frame_function
"""
frame_generators = []
for i in range(height):
frame_generators.append(frame_function(*args, **kwargs))
for _ in range(i * offset): # advance animation
frame_generators[i].__next__()
frame_gen = concatechain(*frame_generators, separator='\n')
yield from frame_gen
|
[
"def",
"multiline_frame_function",
"(",
"frame_function",
":",
"types",
".",
"FrameFunction",
",",
"height",
":",
"int",
",",
"offset",
":",
"int",
"=",
"0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"types",
".",
"FrameGenerator",
":",
"frame_generators",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"height",
")",
":",
"frame_generators",
".",
"append",
"(",
"frame_function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"for",
"_",
"in",
"range",
"(",
"i",
"*",
"offset",
")",
":",
"# advance animation",
"frame_generators",
"[",
"i",
"]",
".",
"__next__",
"(",
")",
"frame_gen",
"=",
"concatechain",
"(",
"*",
"frame_generators",
",",
"separator",
"=",
"'\\n'",
")",
"yield",
"from",
"frame_gen"
] |
Multiline a singlelined frame function. Simply chains several frame
generators together, and applies the specified offset to each one.
Args:
frame_function: A function that returns a singleline FrameGenerator.
height: The amount of frame generators to stack vertically (determines
the height in characters).
offset: An offset to apply to each successive generator. If the offset
is 2, then the first generator starts at frame 0, the second at frame
2, the third at frame 4, and so on.
Returns:
a multiline version fo the generator returned by frame_function
|
[
"Multiline",
"a",
"singlelined",
"frame",
"function",
".",
"Simply",
"chains",
"several",
"frame",
"generators",
"together",
"and",
"applies",
"the",
"specified",
"offset",
"to",
"each",
"one",
"."
] |
cb93d2e914c3ecc4e0007745ff4d546318cf3902
|
https://github.com/slarse/clanimtk/blob/cb93d2e914c3ecc4e0007745ff4d546318cf3902/clanimtk/decorator.py#L108-L133
|
244,191
|
JIC-CSB/jicbioimage.segment
|
jicbioimage/segment/__init__.py
|
SegmentedImage.region_by_identifier
|
def region_by_identifier(self, identifier):
"""Return region of interest corresponding to the supplied identifier.
:param identifier: integer corresponding to the segment of interest
:returns: `jicbioimage.core.region.Region`
"""
if identifier < 0:
raise(ValueError("Identifier must be a positive integer."))
if not np.equal(np.mod(identifier, 1), 0):
raise(ValueError("Identifier must be a positive integer."))
if identifier == 0:
raise(ValueError("0 represents the background."))
return Region.select_from_array(self, identifier)
|
python
|
def region_by_identifier(self, identifier):
"""Return region of interest corresponding to the supplied identifier.
:param identifier: integer corresponding to the segment of interest
:returns: `jicbioimage.core.region.Region`
"""
if identifier < 0:
raise(ValueError("Identifier must be a positive integer."))
if not np.equal(np.mod(identifier, 1), 0):
raise(ValueError("Identifier must be a positive integer."))
if identifier == 0:
raise(ValueError("0 represents the background."))
return Region.select_from_array(self, identifier)
|
[
"def",
"region_by_identifier",
"(",
"self",
",",
"identifier",
")",
":",
"if",
"identifier",
"<",
"0",
":",
"raise",
"(",
"ValueError",
"(",
"\"Identifier must be a positive integer.\"",
")",
")",
"if",
"not",
"np",
".",
"equal",
"(",
"np",
".",
"mod",
"(",
"identifier",
",",
"1",
")",
",",
"0",
")",
":",
"raise",
"(",
"ValueError",
"(",
"\"Identifier must be a positive integer.\"",
")",
")",
"if",
"identifier",
"==",
"0",
":",
"raise",
"(",
"ValueError",
"(",
"\"0 represents the background.\"",
")",
")",
"return",
"Region",
".",
"select_from_array",
"(",
"self",
",",
"identifier",
")"
] |
Return region of interest corresponding to the supplied identifier.
:param identifier: integer corresponding to the segment of interest
:returns: `jicbioimage.core.region.Region`
|
[
"Return",
"region",
"of",
"interest",
"corresponding",
"to",
"the",
"supplied",
"identifier",
"."
] |
289e5ab834913326a097e57bea458ea0737efb0c
|
https://github.com/JIC-CSB/jicbioimage.segment/blob/289e5ab834913326a097e57bea458ea0737efb0c/jicbioimage/segment/__init__.py#L175-L191
|
244,192
|
JIC-CSB/jicbioimage.segment
|
jicbioimage/segment/__init__.py
|
SegmentedImage.merge_regions
|
def merge_regions(self, id1, id2):
"""Merge two regions into one.
The merged region will take on the id1 identifier.
:param id1: region 1 identifier
:param id2: region 2 identifier
"""
region2 = self.region_by_identifier(id2)
self[region2] = id1
|
python
|
def merge_regions(self, id1, id2):
"""Merge two regions into one.
The merged region will take on the id1 identifier.
:param id1: region 1 identifier
:param id2: region 2 identifier
"""
region2 = self.region_by_identifier(id2)
self[region2] = id1
|
[
"def",
"merge_regions",
"(",
"self",
",",
"id1",
",",
"id2",
")",
":",
"region2",
"=",
"self",
".",
"region_by_identifier",
"(",
"id2",
")",
"self",
"[",
"region2",
"]",
"=",
"id1"
] |
Merge two regions into one.
The merged region will take on the id1 identifier.
:param id1: region 1 identifier
:param id2: region 2 identifier
|
[
"Merge",
"two",
"regions",
"into",
"one",
"."
] |
289e5ab834913326a097e57bea458ea0737efb0c
|
https://github.com/JIC-CSB/jicbioimage.segment/blob/289e5ab834913326a097e57bea458ea0737efb0c/jicbioimage/segment/__init__.py#L236-L245
|
244,193
|
jspricke/python-icstask
|
icstask.py
|
task2ics
|
def task2ics():
"""Command line tool to convert from Taskwarrior to iCalendar"""
from argparse import ArgumentParser, FileType
from sys import stdout
parser = ArgumentParser(description='Converter from Taskwarrior to iCalendar syntax.')
parser.add_argument('indir', nargs='?', help='Input Taskwarrior directory (default to ~/.task)', default=expanduser('~/.task'))
parser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout,
help='Output iCalendar file (default: stdout)')
args = parser.parse_args()
task = IcsTask(args.indir)
args.outfile.write(task.to_vobject().serialize())
|
python
|
def task2ics():
"""Command line tool to convert from Taskwarrior to iCalendar"""
from argparse import ArgumentParser, FileType
from sys import stdout
parser = ArgumentParser(description='Converter from Taskwarrior to iCalendar syntax.')
parser.add_argument('indir', nargs='?', help='Input Taskwarrior directory (default to ~/.task)', default=expanduser('~/.task'))
parser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout,
help='Output iCalendar file (default: stdout)')
args = parser.parse_args()
task = IcsTask(args.indir)
args.outfile.write(task.to_vobject().serialize())
|
[
"def",
"task2ics",
"(",
")",
":",
"from",
"argparse",
"import",
"ArgumentParser",
",",
"FileType",
"from",
"sys",
"import",
"stdout",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"'Converter from Taskwarrior to iCalendar syntax.'",
")",
"parser",
".",
"add_argument",
"(",
"'indir'",
",",
"nargs",
"=",
"'?'",
",",
"help",
"=",
"'Input Taskwarrior directory (default to ~/.task)'",
",",
"default",
"=",
"expanduser",
"(",
"'~/.task'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'outfile'",
",",
"nargs",
"=",
"'?'",
",",
"type",
"=",
"FileType",
"(",
"'w'",
")",
",",
"default",
"=",
"stdout",
",",
"help",
"=",
"'Output iCalendar file (default: stdout)'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"task",
"=",
"IcsTask",
"(",
"args",
".",
"indir",
")",
"args",
".",
"outfile",
".",
"write",
"(",
"task",
".",
"to_vobject",
"(",
")",
".",
"serialize",
"(",
")",
")"
] |
Command line tool to convert from Taskwarrior to iCalendar
|
[
"Command",
"line",
"tool",
"to",
"convert",
"from",
"Taskwarrior",
"to",
"iCalendar"
] |
0802233cca569c2174bd96aed0682d04a2a63790
|
https://github.com/jspricke/python-icstask/blob/0802233cca569c2174bd96aed0682d04a2a63790/icstask.py#L338-L350
|
244,194
|
jspricke/python-icstask
|
icstask.py
|
ics2task
|
def ics2task():
"""Command line tool to convert from iCalendar to Taskwarrior"""
from argparse import ArgumentParser, FileType
from sys import stdin
parser = ArgumentParser(description='Converter from iCalendar to Taskwarrior syntax.')
parser.add_argument('infile', nargs='?', type=FileType('r'), default=stdin,
help='Input iCalendar file (default: stdin)')
parser.add_argument('outdir', nargs='?', help='Output Taskwarrior directory (default to ~/.task)', default=expanduser('~/.task'))
args = parser.parse_args()
vobject = readOne(args.infile.read())
task = IcsTask(args.outdir)
for todo in vobject.vtodo_list:
task.to_task(todo)
|
python
|
def ics2task():
"""Command line tool to convert from iCalendar to Taskwarrior"""
from argparse import ArgumentParser, FileType
from sys import stdin
parser = ArgumentParser(description='Converter from iCalendar to Taskwarrior syntax.')
parser.add_argument('infile', nargs='?', type=FileType('r'), default=stdin,
help='Input iCalendar file (default: stdin)')
parser.add_argument('outdir', nargs='?', help='Output Taskwarrior directory (default to ~/.task)', default=expanduser('~/.task'))
args = parser.parse_args()
vobject = readOne(args.infile.read())
task = IcsTask(args.outdir)
for todo in vobject.vtodo_list:
task.to_task(todo)
|
[
"def",
"ics2task",
"(",
")",
":",
"from",
"argparse",
"import",
"ArgumentParser",
",",
"FileType",
"from",
"sys",
"import",
"stdin",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"'Converter from iCalendar to Taskwarrior syntax.'",
")",
"parser",
".",
"add_argument",
"(",
"'infile'",
",",
"nargs",
"=",
"'?'",
",",
"type",
"=",
"FileType",
"(",
"'r'",
")",
",",
"default",
"=",
"stdin",
",",
"help",
"=",
"'Input iCalendar file (default: stdin)'",
")",
"parser",
".",
"add_argument",
"(",
"'outdir'",
",",
"nargs",
"=",
"'?'",
",",
"help",
"=",
"'Output Taskwarrior directory (default to ~/.task)'",
",",
"default",
"=",
"expanduser",
"(",
"'~/.task'",
")",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"vobject",
"=",
"readOne",
"(",
"args",
".",
"infile",
".",
"read",
"(",
")",
")",
"task",
"=",
"IcsTask",
"(",
"args",
".",
"outdir",
")",
"for",
"todo",
"in",
"vobject",
".",
"vtodo_list",
":",
"task",
".",
"to_task",
"(",
"todo",
")"
] |
Command line tool to convert from iCalendar to Taskwarrior
|
[
"Command",
"line",
"tool",
"to",
"convert",
"from",
"iCalendar",
"to",
"Taskwarrior"
] |
0802233cca569c2174bd96aed0682d04a2a63790
|
https://github.com/jspricke/python-icstask/blob/0802233cca569c2174bd96aed0682d04a2a63790/icstask.py#L353-L367
|
244,195
|
jspricke/python-icstask
|
icstask.py
|
IcsTask._update
|
def _update(self):
"""Reload Taskwarrior files if the mtime is newer"""
update = False
with self._lock:
for fname in ['pending.data', 'completed.data']:
data_file = join(self._data_location, fname)
if exists(data_file):
mtime = getmtime(data_file)
if mtime > self._mtime:
self._mtime = mtime
update = True
if update:
self._tasks = {}
tasklist = loads(run(['task', 'rc.verbose=nothing', 'rc.hooks=off', 'rc.data.location={self._data_location}'.format(**locals()), 'export'], stdout=PIPE).stdout.decode('utf-8'))
for task in tasklist:
project = task['project'] if 'project' in task else 'unaffiliated'
if project not in self._tasks:
self._tasks[project] = {}
self._tasks[project][task['uuid']] = task
|
python
|
def _update(self):
"""Reload Taskwarrior files if the mtime is newer"""
update = False
with self._lock:
for fname in ['pending.data', 'completed.data']:
data_file = join(self._data_location, fname)
if exists(data_file):
mtime = getmtime(data_file)
if mtime > self._mtime:
self._mtime = mtime
update = True
if update:
self._tasks = {}
tasklist = loads(run(['task', 'rc.verbose=nothing', 'rc.hooks=off', 'rc.data.location={self._data_location}'.format(**locals()), 'export'], stdout=PIPE).stdout.decode('utf-8'))
for task in tasklist:
project = task['project'] if 'project' in task else 'unaffiliated'
if project not in self._tasks:
self._tasks[project] = {}
self._tasks[project][task['uuid']] = task
|
[
"def",
"_update",
"(",
"self",
")",
":",
"update",
"=",
"False",
"with",
"self",
".",
"_lock",
":",
"for",
"fname",
"in",
"[",
"'pending.data'",
",",
"'completed.data'",
"]",
":",
"data_file",
"=",
"join",
"(",
"self",
".",
"_data_location",
",",
"fname",
")",
"if",
"exists",
"(",
"data_file",
")",
":",
"mtime",
"=",
"getmtime",
"(",
"data_file",
")",
"if",
"mtime",
">",
"self",
".",
"_mtime",
":",
"self",
".",
"_mtime",
"=",
"mtime",
"update",
"=",
"True",
"if",
"update",
":",
"self",
".",
"_tasks",
"=",
"{",
"}",
"tasklist",
"=",
"loads",
"(",
"run",
"(",
"[",
"'task'",
",",
"'rc.verbose=nothing'",
",",
"'rc.hooks=off'",
",",
"'rc.data.location={self._data_location}'",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
",",
"'export'",
"]",
",",
"stdout",
"=",
"PIPE",
")",
".",
"stdout",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"for",
"task",
"in",
"tasklist",
":",
"project",
"=",
"task",
"[",
"'project'",
"]",
"if",
"'project'",
"in",
"task",
"else",
"'unaffiliated'",
"if",
"project",
"not",
"in",
"self",
".",
"_tasks",
":",
"self",
".",
"_tasks",
"[",
"project",
"]",
"=",
"{",
"}",
"self",
".",
"_tasks",
"[",
"project",
"]",
"[",
"task",
"[",
"'uuid'",
"]",
"]",
"=",
"task"
] |
Reload Taskwarrior files if the mtime is newer
|
[
"Reload",
"Taskwarrior",
"files",
"if",
"the",
"mtime",
"is",
"newer"
] |
0802233cca569c2174bd96aed0682d04a2a63790
|
https://github.com/jspricke/python-icstask/blob/0802233cca569c2174bd96aed0682d04a2a63790/icstask.py#L47-L67
|
244,196
|
jspricke/python-icstask
|
icstask.py
|
IcsTask.to_vobjects
|
def to_vobjects(self, filename, uids=None):
"""Return iCal objects and etags of all Taskwarrior entries in uids
filename -- the Taskwarrior project
uids -- the UIDs of the Taskwarrior tasks (all if None)
"""
self._update()
if not uids:
uids = self.get_uids(filename)
project = basename(filename)
items = []
for uid in uids:
vtodos = iCalendar()
uuid = uid.split('@')[0]
self._gen_vtodo(self._tasks[project][uuid], vtodos.add('vtodo'))
items.append((uid, vtodos, '"%s"' % self._tasks[project][uuid]['modified']))
return items
|
python
|
def to_vobjects(self, filename, uids=None):
"""Return iCal objects and etags of all Taskwarrior entries in uids
filename -- the Taskwarrior project
uids -- the UIDs of the Taskwarrior tasks (all if None)
"""
self._update()
if not uids:
uids = self.get_uids(filename)
project = basename(filename)
items = []
for uid in uids:
vtodos = iCalendar()
uuid = uid.split('@')[0]
self._gen_vtodo(self._tasks[project][uuid], vtodos.add('vtodo'))
items.append((uid, vtodos, '"%s"' % self._tasks[project][uuid]['modified']))
return items
|
[
"def",
"to_vobjects",
"(",
"self",
",",
"filename",
",",
"uids",
"=",
"None",
")",
":",
"self",
".",
"_update",
"(",
")",
"if",
"not",
"uids",
":",
"uids",
"=",
"self",
".",
"get_uids",
"(",
"filename",
")",
"project",
"=",
"basename",
"(",
"filename",
")",
"items",
"=",
"[",
"]",
"for",
"uid",
"in",
"uids",
":",
"vtodos",
"=",
"iCalendar",
"(",
")",
"uuid",
"=",
"uid",
".",
"split",
"(",
"'@'",
")",
"[",
"0",
"]",
"self",
".",
"_gen_vtodo",
"(",
"self",
".",
"_tasks",
"[",
"project",
"]",
"[",
"uuid",
"]",
",",
"vtodos",
".",
"add",
"(",
"'vtodo'",
")",
")",
"items",
".",
"append",
"(",
"(",
"uid",
",",
"vtodos",
",",
"'\"%s\"'",
"%",
"self",
".",
"_tasks",
"[",
"project",
"]",
"[",
"uuid",
"]",
"[",
"'modified'",
"]",
")",
")",
"return",
"items"
] |
Return iCal objects and etags of all Taskwarrior entries in uids
filename -- the Taskwarrior project
uids -- the UIDs of the Taskwarrior tasks (all if None)
|
[
"Return",
"iCal",
"objects",
"and",
"etags",
"of",
"all",
"Taskwarrior",
"entries",
"in",
"uids"
] |
0802233cca569c2174bd96aed0682d04a2a63790
|
https://github.com/jspricke/python-icstask/blob/0802233cca569c2174bd96aed0682d04a2a63790/icstask.py#L89-L108
|
244,197
|
jspricke/python-icstask
|
icstask.py
|
IcsTask.to_vobject
|
def to_vobject(self, project=None, uid=None):
"""Return vObject object of Taskwarrior tasks
If filename and UID are specified, the vObject only contains that task.
If only a filename is specified, the vObject contains all events in the project.
Otherwise the vObject contains all all objects of all files associated with the IcsTask object.
project -- the Taskwarrior project
uid -- the UID of the task
"""
self._update()
vtodos = iCalendar()
if uid:
uid = uid.split('@')[0]
if not project:
for p in self._tasks:
if uid in self._tasks[p]:
project = p
break
self._gen_vtodo(self._tasks[basename(project)][uid], vtodos.add('vtodo'))
elif project:
for task in self._tasks[basename(project)].values():
self._gen_vtodo(task, vtodos.add('vtodo'))
else:
for project in self._tasks:
for task in self._tasks[project].values():
self._gen_vtodo(task, vtodos.add('vtodo'))
return vtodos
|
python
|
def to_vobject(self, project=None, uid=None):
"""Return vObject object of Taskwarrior tasks
If filename and UID are specified, the vObject only contains that task.
If only a filename is specified, the vObject contains all events in the project.
Otherwise the vObject contains all all objects of all files associated with the IcsTask object.
project -- the Taskwarrior project
uid -- the UID of the task
"""
self._update()
vtodos = iCalendar()
if uid:
uid = uid.split('@')[0]
if not project:
for p in self._tasks:
if uid in self._tasks[p]:
project = p
break
self._gen_vtodo(self._tasks[basename(project)][uid], vtodos.add('vtodo'))
elif project:
for task in self._tasks[basename(project)].values():
self._gen_vtodo(task, vtodos.add('vtodo'))
else:
for project in self._tasks:
for task in self._tasks[project].values():
self._gen_vtodo(task, vtodos.add('vtodo'))
return vtodos
|
[
"def",
"to_vobject",
"(",
"self",
",",
"project",
"=",
"None",
",",
"uid",
"=",
"None",
")",
":",
"self",
".",
"_update",
"(",
")",
"vtodos",
"=",
"iCalendar",
"(",
")",
"if",
"uid",
":",
"uid",
"=",
"uid",
".",
"split",
"(",
"'@'",
")",
"[",
"0",
"]",
"if",
"not",
"project",
":",
"for",
"p",
"in",
"self",
".",
"_tasks",
":",
"if",
"uid",
"in",
"self",
".",
"_tasks",
"[",
"p",
"]",
":",
"project",
"=",
"p",
"break",
"self",
".",
"_gen_vtodo",
"(",
"self",
".",
"_tasks",
"[",
"basename",
"(",
"project",
")",
"]",
"[",
"uid",
"]",
",",
"vtodos",
".",
"add",
"(",
"'vtodo'",
")",
")",
"elif",
"project",
":",
"for",
"task",
"in",
"self",
".",
"_tasks",
"[",
"basename",
"(",
"project",
")",
"]",
".",
"values",
"(",
")",
":",
"self",
".",
"_gen_vtodo",
"(",
"task",
",",
"vtodos",
".",
"add",
"(",
"'vtodo'",
")",
")",
"else",
":",
"for",
"project",
"in",
"self",
".",
"_tasks",
":",
"for",
"task",
"in",
"self",
".",
"_tasks",
"[",
"project",
"]",
".",
"values",
"(",
")",
":",
"self",
".",
"_gen_vtodo",
"(",
"task",
",",
"vtodos",
".",
"add",
"(",
"'vtodo'",
")",
")",
"return",
"vtodos"
] |
Return vObject object of Taskwarrior tasks
If filename and UID are specified, the vObject only contains that task.
If only a filename is specified, the vObject contains all events in the project.
Otherwise the vObject contains all all objects of all files associated with the IcsTask object.
project -- the Taskwarrior project
uid -- the UID of the task
|
[
"Return",
"vObject",
"object",
"of",
"Taskwarrior",
"tasks",
"If",
"filename",
"and",
"UID",
"are",
"specified",
"the",
"vObject",
"only",
"contains",
"that",
"task",
".",
"If",
"only",
"a",
"filename",
"is",
"specified",
"the",
"vObject",
"contains",
"all",
"events",
"in",
"the",
"project",
".",
"Otherwise",
"the",
"vObject",
"contains",
"all",
"all",
"objects",
"of",
"all",
"files",
"associated",
"with",
"the",
"IcsTask",
"object",
"."
] |
0802233cca569c2174bd96aed0682d04a2a63790
|
https://github.com/jspricke/python-icstask/blob/0802233cca569c2174bd96aed0682d04a2a63790/icstask.py#L110-L138
|
244,198
|
jspricke/python-icstask
|
icstask.py
|
IcsTask.get_filesnames
|
def get_filesnames(self):
"""Return a list of all Taskwarrior projects as virtual files in the data directory"""
self._update()
projects = set(list(self._tasks.keys()) + self._task_projects + ['all_projects', 'unaffiliated'])
return [join(self._data_location, p.split()[0]) for p in projects]
|
python
|
def get_filesnames(self):
"""Return a list of all Taskwarrior projects as virtual files in the data directory"""
self._update()
projects = set(list(self._tasks.keys()) + self._task_projects + ['all_projects', 'unaffiliated'])
return [join(self._data_location, p.split()[0]) for p in projects]
|
[
"def",
"get_filesnames",
"(",
"self",
")",
":",
"self",
".",
"_update",
"(",
")",
"projects",
"=",
"set",
"(",
"list",
"(",
"self",
".",
"_tasks",
".",
"keys",
"(",
")",
")",
"+",
"self",
".",
"_task_projects",
"+",
"[",
"'all_projects'",
",",
"'unaffiliated'",
"]",
")",
"return",
"[",
"join",
"(",
"self",
".",
"_data_location",
",",
"p",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"for",
"p",
"in",
"projects",
"]"
] |
Return a list of all Taskwarrior projects as virtual files in the data directory
|
[
"Return",
"a",
"list",
"of",
"all",
"Taskwarrior",
"projects",
"as",
"virtual",
"files",
"in",
"the",
"data",
"directory"
] |
0802233cca569c2174bd96aed0682d04a2a63790
|
https://github.com/jspricke/python-icstask/blob/0802233cca569c2174bd96aed0682d04a2a63790/icstask.py#L269-L273
|
244,199
|
jspricke/python-icstask
|
icstask.py
|
IcsTask.move_vobject
|
def move_vobject(self, uuid, from_project, to_project):
"""Update the project of the task with the UID uuid"""
if to_project not in self.get_filesnames():
return
uuid = uuid.split('@')[0]
with self._lock:
run(['task', 'rc.verbose=nothing', 'rc.data.location={self._data_location}'.format(**locals()), 'rc.confirmation=no', uuid, 'modify', 'project:{}'.format(basename(to_project))])
|
python
|
def move_vobject(self, uuid, from_project, to_project):
"""Update the project of the task with the UID uuid"""
if to_project not in self.get_filesnames():
return
uuid = uuid.split('@')[0]
with self._lock:
run(['task', 'rc.verbose=nothing', 'rc.data.location={self._data_location}'.format(**locals()), 'rc.confirmation=no', uuid, 'modify', 'project:{}'.format(basename(to_project))])
|
[
"def",
"move_vobject",
"(",
"self",
",",
"uuid",
",",
"from_project",
",",
"to_project",
")",
":",
"if",
"to_project",
"not",
"in",
"self",
".",
"get_filesnames",
"(",
")",
":",
"return",
"uuid",
"=",
"uuid",
".",
"split",
"(",
"'@'",
")",
"[",
"0",
"]",
"with",
"self",
".",
"_lock",
":",
"run",
"(",
"[",
"'task'",
",",
"'rc.verbose=nothing'",
",",
"'rc.data.location={self._data_location}'",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
",",
"'rc.confirmation=no'",
",",
"uuid",
",",
"'modify'",
",",
"'project:{}'",
".",
"format",
"(",
"basename",
"(",
"to_project",
")",
")",
"]",
")"
] |
Update the project of the task with the UID uuid
|
[
"Update",
"the",
"project",
"of",
"the",
"task",
"with",
"the",
"UID",
"uuid"
] |
0802233cca569c2174bd96aed0682d04a2a63790
|
https://github.com/jspricke/python-icstask/blob/0802233cca569c2174bd96aed0682d04a2a63790/icstask.py#L328-L335
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.