text stringlengths 4 1.02M | meta dict |
|---|---|
"""Generic presubmit checks that can be reused by other presubmit checks."""
import os as _os
_HERE = _os.path.dirname(_os.path.abspath(__file__))
# Justifications for each filter:
#
# - build/include : Too many; fix in the future.
# - build/include_order : Not happening; #ifdefed includes.
# - build/namespace : I'm surprised by how often we violate this rule.
# - readability/casting : Mistakes a whole bunch of function pointer.
# - runtime/int : Can be fixed long term; volume of errors too high
# - runtime/virtual : Broken now, but can be fixed in the future?
# - whitespace/braces : We have a lot of explicit scoping in chrome code.
# - readability/inheritance : Temporary, while the OVERRIDE and FINAL fixup
# is in progress.
DEFAULT_LINT_FILTERS = [
'-build/include',
'-build/include_order',
'-build/namespace',
'-readability/casting',
'-runtime/int',
'-runtime/virtual',
'-whitespace/braces',
'-readability/inheritance'
]
### Description checks
def CheckChangeHasTestField(input_api, output_api):
"""Requires that the changelist have a TEST= field."""
if input_api.change.TEST:
return []
else:
return [output_api.PresubmitNotifyResult(
'If this change requires manual test instructions to QA team, add '
'TEST=[instructions].')]
def CheckChangeHasBugField(input_api, output_api):
"""Requires that the changelist have a BUG= field."""
if input_api.change.BUG:
return []
else:
return [output_api.PresubmitNotifyResult(
'If this change has an associated bug, add BUG=[bug number].')]
def CheckChangeHasTestedField(input_api, output_api):
"""Requires that the changelist have a TESTED= field."""
if input_api.change.TESTED:
return []
else:
return [output_api.PresubmitError('Changelist must have a TESTED= field.')]
def CheckChangeHasQaField(input_api, output_api):
"""Requires that the changelist have a QA= field."""
if input_api.change.QA:
return []
else:
return [output_api.PresubmitError('Changelist must have a QA= field.')]
def CheckDoNotSubmitInDescription(input_api, output_api):
"""Checks that the user didn't add 'DO NOT ''SUBMIT' to the CL description.
"""
keyword = 'DO NOT ''SUBMIT'
if keyword in input_api.change.DescriptionText():
return [output_api.PresubmitError(
keyword + ' is present in the changelist description.')]
else:
return []
def CheckChangeHasDescription(input_api, output_api):
"""Checks the CL description is not empty."""
text = input_api.change.DescriptionText()
if text.strip() == '':
if input_api.is_committing:
return [output_api.PresubmitError('Add a description to the CL.')]
else:
return [output_api.PresubmitNotifyResult('Add a description to the CL.')]
return []
def CheckChangeWasUploaded(input_api, output_api):
"""Checks that the issue was uploaded before committing."""
if input_api.is_committing and not input_api.change.issue:
return [output_api.PresubmitError(
'Issue wasn\'t uploaded. Please upload first.')]
return []
### Content checks
def CheckDoNotSubmitInFiles(input_api, output_api):
"""Checks that the user didn't add 'DO NOT ''SUBMIT' to any files."""
# We want to check every text file, not just source files.
file_filter = lambda x : x
keyword = 'DO NOT ''SUBMIT'
errors = _FindNewViolationsOfRule(lambda _, line : keyword not in line,
input_api, file_filter)
text = '\n'.join('Found %s in %s' % (keyword, loc) for loc in errors)
if text:
return [output_api.PresubmitError(text)]
return []
def CheckChangeLintsClean(input_api, output_api, source_file_filter=None,
lint_filters=None, verbose_level=None):
"""Checks that all '.cc' and '.h' files pass cpplint.py."""
_RE_IS_TEST = input_api.re.compile(r'.*tests?.(cc|h)$')
result = []
cpplint = input_api.cpplint
# Access to a protected member _XX of a client class
# pylint: disable=W0212
cpplint._cpplint_state.ResetErrorCounts()
lint_filters = lint_filters or DEFAULT_LINT_FILTERS
cpplint._SetFilters(','.join(lint_filters))
# We currently are more strict with normal code than unit tests; 4 and 5 are
# the verbosity level that would normally be passed to cpplint.py through
# --verbose=#. Hopefully, in the future, we can be more verbose.
files = [f.AbsoluteLocalPath() for f in
input_api.AffectedSourceFiles(source_file_filter)]
for file_name in files:
if _RE_IS_TEST.match(file_name):
level = 5
else:
level = 4
verbose_level = verbose_level or level
cpplint.ProcessFile(file_name, verbose_level)
if cpplint._cpplint_state.error_count > 0:
if input_api.is_committing:
res_type = output_api.PresubmitError
else:
res_type = output_api.PresubmitPromptWarning
result = [res_type('Changelist failed cpplint.py check.')]
return result
def CheckChangeHasNoCR(input_api, output_api, source_file_filter=None):
"""Checks no '\r' (CR) character is in any source files."""
cr_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
if '\r' in input_api.ReadFile(f, 'rb'):
cr_files.append(f.LocalPath())
if cr_files:
return [output_api.PresubmitPromptWarning(
'Found a CR character in these files:', items=cr_files)]
return []
def CheckSvnModifiedDirectories(input_api, output_api, source_file_filter=None):
"""Checks for files in svn modified directories.
They will get submitted on accident because svn commits recursively by
default, and that's very dangerous.
"""
if input_api.change.scm != 'svn':
return []
errors = []
current_cl_files = input_api.change.GetModifiedFiles()
all_modified_files = input_api.change.GetAllModifiedFiles()
# Filter out files in the current CL.
modified_files = [f for f in all_modified_files if f not in current_cl_files]
modified_abspaths = [input_api.os_path.abspath(f) for f in modified_files]
for f in input_api.AffectedFiles(file_filter=source_file_filter):
if f.Action() == 'M' and f.IsDirectory():
curpath = f.AbsoluteLocalPath()
bad_files = []
# Check if any of the modified files in other CLs are under curpath.
for i in xrange(len(modified_files)):
abspath = modified_abspaths[i]
if input_api.os_path.commonprefix([curpath, abspath]) == curpath:
bad_files.append(modified_files[i])
if bad_files:
if input_api.is_committing:
error_type = output_api.PresubmitPromptWarning
else:
error_type = output_api.PresubmitNotifyResult
errors.append(error_type(
'Potential accidental commits in changelist %s:' % f.LocalPath(),
items=bad_files))
return errors
def CheckChangeHasOnlyOneEol(input_api, output_api, source_file_filter=None):
"""Checks the files ends with one and only one \n (LF)."""
eof_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
# Check that the file ends in one and only one newline character.
if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'):
eof_files.append(f.LocalPath())
if eof_files:
return [output_api.PresubmitPromptWarning(
'These files should end in one (and only one) newline character:',
items=eof_files)]
return []
def CheckChangeHasNoCrAndHasOnlyOneEol(input_api, output_api,
source_file_filter=None):
"""Runs both CheckChangeHasNoCR and CheckChangeHasOnlyOneEOL in one pass.
It is faster because it is reading the file only once.
"""
cr_files = []
eof_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
if '\r' in contents:
cr_files.append(f.LocalPath())
# Check that the file ends in one and only one newline character.
if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'):
eof_files.append(f.LocalPath())
outputs = []
if cr_files:
outputs.append(output_api.PresubmitPromptWarning(
'Found a CR character in these files:', items=cr_files))
if eof_files:
outputs.append(output_api.PresubmitPromptWarning(
'These files should end in one (and only one) newline character:',
items=eof_files))
return outputs
def _ReportErrorFileAndLine(filename, line_num, dummy_line):
"""Default error formatter for _FindNewViolationsOfRule."""
return '%s:%s' % (filename, line_num)
def _FindNewViolationsOfRule(callable_rule, input_api, source_file_filter=None,
error_formatter=_ReportErrorFileAndLine):
"""Find all newly introduced violations of a per-line rule (a callable).
Arguments:
callable_rule: a callable taking a file extension and line of input and
returning True if the rule is satisfied and False if there was a problem.
input_api: object to enumerate the affected files.
source_file_filter: a filter to be passed to the input api.
error_formatter: a callable taking (filename, line_number, line) and
returning a formatted error string.
Returns:
A list of the newly-introduced violations reported by the rule.
"""
errors = []
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=source_file_filter):
# For speed, we do two passes, checking first the full file. Shelling out
# to the SCM to determine the changed region can be quite expensive on
# Win32. Assuming that most files will be kept problem-free, we can
# skip the SCM operations most of the time.
extension = str(f.LocalPath()).rsplit('.', 1)[-1]
if all(callable_rule(extension, line) for line in f.NewContents()):
continue # No violation found in full text: can skip considering diff.
for line_num, line in f.ChangedContents():
if not callable_rule(extension, line):
errors.append(error_formatter(f.LocalPath(), line_num, line))
return errors
def CheckChangeHasNoTabs(input_api, output_api, source_file_filter=None):
"""Checks that there are no tab characters in any of the text files to be
submitted.
"""
# In addition to the filter, make sure that makefiles are blacklisted.
if not source_file_filter:
# It's the default filter.
source_file_filter = input_api.FilterSourceFile
def filter_more(affected_file):
basename = input_api.os_path.basename(affected_file.LocalPath())
return (not (basename in ('Makefile', 'makefile') or
basename.endswith('.mk')) and
source_file_filter(affected_file))
tabs = _FindNewViolationsOfRule(lambda _, line : '\t' not in line,
input_api, filter_more)
if tabs:
return [output_api.PresubmitPromptWarning('Found a tab character in:',
long_text='\n'.join(tabs))]
return []
def CheckChangeTodoHasOwner(input_api, output_api, source_file_filter=None):
"""Checks that the user didn't add TODO(name) without an owner."""
unowned_todo = input_api.re.compile('TO''DO[^(]')
errors = _FindNewViolationsOfRule(lambda _, x : not unowned_todo.search(x),
input_api, source_file_filter)
errors = ['Found TO''DO with no owner in ' + x for x in errors]
if errors:
return [output_api.PresubmitPromptWarning('\n'.join(errors))]
return []
def CheckChangeHasNoStrayWhitespace(input_api, output_api,
source_file_filter=None):
"""Checks that there is no stray whitespace at source lines end."""
errors = _FindNewViolationsOfRule(lambda _, line : line.rstrip() == line,
input_api, source_file_filter)
if errors:
return [output_api.PresubmitPromptWarning(
'Found line ending with white spaces in:',
long_text='\n'.join(errors))]
return []
def CheckLongLines(input_api, output_api, maxlen, source_file_filter=None):
"""Checks that there aren't any lines longer than maxlen characters in any of
the text files to be submitted.
"""
maxlens = {
'java': 100,
# This is specifically for Android's handwritten makefiles (Android.mk).
'mk': 200,
'': maxlen,
}
# Language specific exceptions to max line length.
# '.h' is considered an obj-c file extension, since OBJC_EXCEPTIONS are a
# superset of CPP_EXCEPTIONS.
CPP_FILE_EXTS = ('c', 'cc')
CPP_EXCEPTIONS = ('#define', '#endif', '#if', '#include', '#pragma')
JAVA_FILE_EXTS = ('java',)
JAVA_EXCEPTIONS = ('import ', 'package ')
OBJC_FILE_EXTS = ('h', 'm', 'mm')
OBJC_EXCEPTIONS = ('#define', '#endif', '#if', '#import', '#include',
'#pragma')
LANGUAGE_EXCEPTIONS = [
(CPP_FILE_EXTS, CPP_EXCEPTIONS),
(JAVA_FILE_EXTS, JAVA_EXCEPTIONS),
(OBJC_FILE_EXTS, OBJC_EXCEPTIONS),
]
def no_long_lines(file_extension, line):
# Check for language specific exceptions.
if any(file_extension in exts and line.startswith(exceptions)
for exts, exceptions in LANGUAGE_EXCEPTIONS):
return True
file_maxlen = maxlens.get(file_extension, maxlens[''])
# Stupidly long symbols that needs to be worked around if takes 66% of line.
long_symbol = file_maxlen * 2 / 3
# Hard line length limit at 50% more.
extra_maxlen = file_maxlen * 3 / 2
line_len = len(line)
if line_len <= file_maxlen:
return True
# Allow long URLs of any length.
if any((url in line) for url in ('file://', 'http://', 'https://')):
return True
if line_len > extra_maxlen:
return False
if 'url(' in line and file_extension == 'css':
return True
if '<include' in line and file_extension in ('css', 'html', 'js'):
return True
return input_api.re.match(
r'.*[A-Za-z][A-Za-z_0-9]{%d,}.*' % long_symbol, line)
def format_error(filename, line_num, line):
return '%s, line %s, %s chars' % (filename, line_num, len(line))
errors = _FindNewViolationsOfRule(no_long_lines, input_api,
source_file_filter,
error_formatter=format_error)
if errors:
msg = 'Found lines longer than %s characters (first 5 shown).' % maxlen
return [output_api.PresubmitPromptWarning(msg, items=errors[:5])]
else:
return []
def CheckLicense(input_api, output_api, license_re, source_file_filter=None,
accept_empty_files=True):
"""Verifies the license header.
"""
license_re = input_api.re.compile(license_re, input_api.re.MULTILINE)
bad_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
if accept_empty_files and not contents:
continue
if not license_re.search(contents):
bad_files.append(f.LocalPath())
if bad_files:
if input_api.is_committing:
res_type = output_api.PresubmitPromptWarning
else:
res_type = output_api.PresubmitNotifyResult
return [res_type(
'License must match:\n%s\n' % license_re.pattern +
'Found a bad license header in these files:', items=bad_files)]
return []
def CheckChangeSvnEolStyle(input_api, output_api, source_file_filter=None):
"""Checks that the source files have svn:eol-style=LF."""
return CheckSvnProperty(input_api, output_api,
'svn:eol-style', 'LF',
input_api.AffectedSourceFiles(source_file_filter))
def CheckSvnForCommonMimeTypes(input_api, output_api):
"""Checks that common binary file types have the correct svn:mime-type."""
output = []
files = input_api.AffectedFiles(include_deletes=False)
def IsExts(x, exts):
path = x.LocalPath()
for extension in exts:
if path.endswith(extension):
return True
return False
def FilterFiles(extension):
return filter(lambda x: IsExts(x, extension), files)
def RunCheck(mime_type, files):
output.extend(CheckSvnProperty(input_api, output_api, 'svn:mime-type',
mime_type, files))
RunCheck('application/pdf', FilterFiles(['.pdf']))
RunCheck('image/bmp', FilterFiles(['.bmp']))
RunCheck('image/gif', FilterFiles(['.gif']))
RunCheck('image/png', FilterFiles(['.png']))
RunCheck('image/jpeg', FilterFiles(['.jpg', '.jpeg', '.jpe']))
RunCheck('image/vnd.microsoft.icon', FilterFiles(['.ico']))
return output
def CheckSvnProperty(input_api, output_api, prop, expected, affected_files):
"""Checks that affected_files files have prop=expected."""
if input_api.change.scm != 'svn':
return []
bad = filter(lambda f: f.Property(prop) != expected, affected_files)
if bad:
if input_api.is_committing:
res_type = output_api.PresubmitError
else:
res_type = output_api.PresubmitNotifyResult
message = 'Run the command: svn pset %s %s \\' % (prop, expected)
return [res_type(message, items=bad)]
return []
### Other checks
def CheckDoNotSubmit(input_api, output_api):
return (
CheckDoNotSubmitInDescription(input_api, output_api) +
CheckDoNotSubmitInFiles(input_api, output_api)
)
def CheckTreeIsOpen(input_api, output_api,
url=None, closed=None, json_url=None):
"""Check whether to allow commit without prompt.
Supports two styles:
1. Checks that an url's content doesn't match a regexp that would mean that
the tree is closed. (old)
2. Check the json_url to decide whether to allow commit without prompt.
Args:
input_api: input related apis.
output_api: output related apis.
url: url to use for regex based tree status.
closed: regex to match for closed status.
json_url: url to download json style status.
"""
if not input_api.is_committing:
return []
try:
if json_url:
connection = input_api.urllib2.urlopen(json_url)
status = input_api.json.loads(connection.read())
connection.close()
if not status['can_commit_freely']:
short_text = 'Tree state is: ' + status['general_state']
long_text = status['message'] + '\n' + json_url
return [output_api.PresubmitError(short_text, long_text=long_text)]
else:
# TODO(bradnelson): drop this once all users are gone.
connection = input_api.urllib2.urlopen(url)
status = connection.read()
connection.close()
if input_api.re.match(closed, status):
long_text = status + '\n' + url
return [output_api.PresubmitError('The tree is closed.',
long_text=long_text)]
except IOError as e:
return [output_api.PresubmitError('Error fetching tree status.',
long_text=str(e))]
return []
def GetUnitTestsInDirectory(
input_api, output_api, directory, whitelist=None, blacklist=None, env=None):
"""Lists all files in a directory and runs them. Doesn't recurse.
It's mainly a wrapper for RunUnitTests. Use whitelist and blacklist to filter
tests accordingly.
"""
unit_tests = []
test_path = input_api.os_path.abspath(
input_api.os_path.join(input_api.PresubmitLocalPath(), directory))
def check(filename, filters):
return any(True for i in filters if input_api.re.match(i, filename))
to_run = found = 0
for filename in input_api.os_listdir(test_path):
found += 1
fullpath = input_api.os_path.join(test_path, filename)
if not input_api.os_path.isfile(fullpath):
continue
if whitelist and not check(filename, whitelist):
continue
if blacklist and check(filename, blacklist):
continue
unit_tests.append(input_api.os_path.join(directory, filename))
to_run += 1
input_api.logging.debug('Found %d files, running %d' % (found, to_run))
if not to_run:
return [
output_api.PresubmitPromptWarning(
'Out of %d files, found none that matched w=%r, b=%r in directory %s'
% (found, whitelist, blacklist, directory))
]
return GetUnitTests(input_api, output_api, unit_tests, env)
def GetUnitTests(input_api, output_api, unit_tests, env=None):
"""Runs all unit tests in a directory.
On Windows, sys.executable is used for unit tests ending with ".py".
"""
# We don't want to hinder users from uploading incomplete patches.
if input_api.is_committing:
message_type = output_api.PresubmitError
else:
message_type = output_api.PresubmitPromptWarning
results = []
for unit_test in unit_tests:
cmd = []
if input_api.platform == 'win32' and unit_test.endswith('.py'):
# Windows needs some help.
cmd = [input_api.python_executable]
cmd.append(unit_test)
if input_api.verbose:
cmd.append('--verbose')
kwargs = {'cwd': input_api.PresubmitLocalPath()}
if env:
kwargs['env'] = env
results.append(input_api.Command(
name=unit_test,
cmd=cmd,
kwargs=kwargs,
message=message_type))
return results
def GetUnitTestsRecursively(input_api, output_api, directory,
whitelist, blacklist):
"""Gets all files in the directory tree (git repo) that match the whitelist.
Restricts itself to only find files within the Change's source repo, not
dependencies.
"""
def check(filename):
return (any(input_api.re.match(f, filename) for f in whitelist) and
not any(input_api.re.match(f, filename) for f in blacklist))
tests = []
to_run = found = 0
for filepath in input_api.change.AllFiles(directory):
found += 1
if check(filepath):
to_run += 1
tests.append(filepath)
input_api.logging.debug('Found %d files, running %d' % (found, to_run))
if not to_run:
return [
output_api.PresubmitPromptWarning(
'Out of %d files, found none that matched w=%r, b=%r in directory %s'
% (found, whitelist, blacklist, directory))
]
return GetUnitTests(input_api, output_api, tests)
def GetPythonUnitTests(input_api, output_api, unit_tests):
"""Run the unit tests out of process, capture the output and use the result
code to determine success.
DEPRECATED.
"""
# We don't want to hinder users from uploading incomplete patches.
if input_api.is_committing:
message_type = output_api.PresubmitError
else:
message_type = output_api.PresubmitNotifyResult
results = []
for unit_test in unit_tests:
# Run the unit tests out of process. This is because some unit tests
# stub out base libraries and don't clean up their mess. It's too easy to
# get subtle bugs.
cwd = None
env = None
unit_test_name = unit_test
# 'python -m test.unit_test' doesn't work. We need to change to the right
# directory instead.
if '.' in unit_test:
# Tests imported in submodules (subdirectories) assume that the current
# directory is in the PYTHONPATH. Manually fix that.
unit_test = unit_test.replace('.', '/')
cwd = input_api.os_path.dirname(unit_test)
unit_test = input_api.os_path.basename(unit_test)
env = input_api.environ.copy()
# At least on Windows, it seems '.' must explicitly be in PYTHONPATH
backpath = [
'.', input_api.os_path.pathsep.join(['..'] * (cwd.count('/') + 1))
]
if env.get('PYTHONPATH'):
backpath.append(env.get('PYTHONPATH'))
env['PYTHONPATH'] = input_api.os_path.pathsep.join((backpath))
cmd = [input_api.python_executable, '-m', '%s' % unit_test]
results.append(input_api.Command(
name=unit_test_name,
cmd=cmd,
kwargs={'env': env, 'cwd': cwd},
message=message_type))
return results
def RunUnitTestsInDirectory(input_api, *args, **kwargs):
"""Run tests in a directory serially.
For better performance, use GetUnitTestsInDirectory and then
pass to input_api.RunTests.
"""
return input_api.RunTests(
GetUnitTestsInDirectory(input_api, *args, **kwargs), False)
def RunUnitTests(input_api, *args, **kwargs):
"""Run tests serially.
For better performance, use GetUnitTests and then pass to
input_api.RunTests.
"""
return input_api.RunTests(GetUnitTests(input_api, *args, **kwargs), False)
def RunPythonUnitTests(input_api, *args, **kwargs):
"""Run python tests in a directory serially.
DEPRECATED
"""
return input_api.RunTests(
GetPythonUnitTests(input_api, *args, **kwargs), False)
def _FetchAllFiles(input_api, white_list, black_list):
"""Hack to fetch all files."""
# We cannot use AffectedFiles here because we want to test every python
# file on each single python change. It's because a change in a python file
# can break another unmodified file.
# Use code similar to InputApi.FilterSourceFile()
def Find(filepath, filters):
for item in filters:
if input_api.re.match(item, filepath):
return True
return False
files = []
path_len = len(input_api.PresubmitLocalPath())
for dirpath, dirnames, filenames in input_api.os_walk(
input_api.PresubmitLocalPath()):
# Passes dirnames in black list to speed up search.
for item in dirnames[:]:
filepath = input_api.os_path.join(dirpath, item)[path_len + 1:]
if Find(filepath, black_list):
dirnames.remove(item)
for item in filenames:
filepath = input_api.os_path.join(dirpath, item)[path_len + 1:]
if Find(filepath, white_list) and not Find(filepath, black_list):
files.append(filepath)
return files
def GetPylint(input_api, output_api, white_list=None, black_list=None,
disabled_warnings=None, extra_paths_list=None, pylintrc=None):
"""Run pylint on python files.
The default white_list enforces looking only at *.py files.
"""
white_list = tuple(white_list or ('.*\.py$',))
black_list = tuple(black_list or input_api.DEFAULT_BLACK_LIST)
extra_paths_list = extra_paths_list or []
if input_api.is_committing:
error_type = output_api.PresubmitError
else:
error_type = output_api.PresubmitPromptWarning
# Only trigger if there is at least one python file affected.
def rel_path(regex):
"""Modifies a regex for a subject to accept paths relative to root."""
def samefile(a, b):
# Default implementation for platforms lacking os.path.samefile
# (like Windows).
return input_api.os_path.abspath(a) == input_api.os_path.abspath(b)
samefile = getattr(input_api.os_path, 'samefile', samefile)
if samefile(input_api.PresubmitLocalPath(),
input_api.change.RepositoryRoot()):
return regex
prefix = input_api.os_path.join(input_api.os_path.relpath(
input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()), '')
return input_api.re.escape(prefix) + regex
src_filter = lambda x: input_api.FilterSourceFile(
x, map(rel_path, white_list), map(rel_path, black_list))
if not input_api.AffectedSourceFiles(src_filter):
input_api.logging.info('Skipping pylint: no matching changes.')
return []
if pylintrc is not None:
pylintrc = input_api.os_path.join(input_api.PresubmitLocalPath(), pylintrc)
else:
pylintrc = input_api.os_path.join(_HERE, 'pylintrc')
extra_args = ['--rcfile=%s' % pylintrc]
if disabled_warnings:
extra_args.extend(['-d', ','.join(disabled_warnings)])
files = _FetchAllFiles(input_api, white_list, black_list)
if not files:
return []
files.sort()
input_api.logging.info('Running pylint on %d files', len(files))
input_api.logging.debug('Running pylint on: %s', files)
# Copy the system path to the environment so pylint can find the right
# imports.
env = input_api.environ.copy()
import sys
env['PYTHONPATH'] = input_api.os_path.pathsep.join(
extra_paths_list + sys.path).encode('utf8')
def GetPylintCmd(files):
# Windows needs help running python files so we explicitly specify
# the interpreter to use. It also has limitations on the size of
# the command-line, so we pass arguments via a pipe.
if len(files) == 1:
description = files[0]
else:
description = '%s files' % len(files)
return input_api.Command(
name='Pylint (%s)' % description,
cmd=[input_api.python_executable,
input_api.os_path.join(_HERE, 'third_party', 'pylint.py'),
'--args-on-stdin'],
kwargs={'env': env, 'stdin': '\n'.join(files + extra_args)},
message=error_type)
# Always run pylint and pass it all the py files at once.
# Passing py files one at time is slower and can produce
# different results. input_api.verbose used to be used
# to enable this behaviour but differing behaviour in
# verbose mode is not desirable.
# Leave this unreachable code in here so users can make
# a quick local edit to diagnose pylint issues more
# easily.
if True:
return [GetPylintCmd(files)]
else:
return map(lambda x: GetPylintCmd([x]), files)
def RunPylint(input_api, *args, **kwargs):
"""Legacy presubmit function.
For better performance, get all tests and then pass to
input_api.RunTests.
"""
return input_api.RunTests(GetPylint(input_api, *args, **kwargs), False)
# TODO(dpranke): Get the host_url from the input_api instead
def CheckRietveldTryJobExecution(dummy_input_api, dummy_output_api,
dummy_host_url, dummy_platforms,
dummy_owner):
# Temporarily 'fix' the check while the Rietveld API is being upgraded to
# something sensible.
return []
def CheckBuildbotPendingBuilds(input_api, output_api, url, max_pendings,
ignored):
try:
connection = input_api.urllib2.urlopen(url)
raw_data = connection.read()
connection.close()
except IOError:
return [output_api.PresubmitNotifyResult('%s is not accessible' % url)]
try:
data = input_api.json.loads(raw_data)
except ValueError:
return [output_api.PresubmitNotifyResult('Received malformed json while '
'looking up buildbot status')]
out = []
for (builder_name, builder) in data.iteritems():
if builder_name in ignored:
continue
if builder.get('state', '') == 'offline':
continue
pending_builds_len = len(builder.get('pending_builds', []))
if pending_builds_len > max_pendings:
out.append('%s has %d build(s) pending' %
(builder_name, pending_builds_len))
if out:
return [output_api.PresubmitPromptWarning(
'Build(s) pending. It is suggested to wait that no more than %d '
'builds are pending.' % max_pendings,
long_text='\n'.join(out))]
return []
def CheckOwners(input_api, output_api, source_file_filter=None):
if input_api.is_committing:
if input_api.tbr:
return [output_api.PresubmitNotifyResult(
'--tbr was specified, skipping OWNERS check')]
if input_api.change.issue:
if _GetRietveldIssueProps(input_api, None).get('cq_dry_run', False):
return [output_api.PresubmitNotifyResult(
'This is a CQ dry run, skipping OWNERS check')]
else:
return [output_api.PresubmitError("OWNERS check failed: this change has "
"no Rietveld issue number, so we can't check it for approvals.")]
needed = 'LGTM from an OWNER'
output = output_api.PresubmitError
else:
needed = 'OWNER reviewers'
output = output_api.PresubmitNotifyResult
affected_files = set([f.LocalPath() for f in
input_api.change.AffectedFiles(file_filter=source_file_filter)])
owners_db = input_api.owners_db
owner_email, reviewers = _RietveldOwnerAndReviewers(
input_api,
owners_db.email_regexp,
approval_needed=input_api.is_committing)
owner_email = owner_email or input_api.change.author_email
if owner_email:
reviewers_plus_owner = set([owner_email]).union(reviewers)
missing_files = owners_db.files_not_covered_by(affected_files,
reviewers_plus_owner)
else:
missing_files = owners_db.files_not_covered_by(affected_files, reviewers)
if missing_files:
output_list = [
output('Missing %s for these files:\n %s' %
(needed, '\n '.join(sorted(missing_files))))]
if not input_api.is_committing:
suggested_owners = owners_db.reviewers_for(missing_files, owner_email)
output_list.append(output('Suggested OWNERS: ' +
'(Use "git-cl owners" to interactively select owners.)\n %s' %
('\n '.join(suggested_owners or []))))
return output_list
if input_api.is_committing and not reviewers:
return [output('Missing LGTM from someone other than %s' % owner_email)]
return []
def _GetRietveldIssueProps(input_api, messages):
"""Gets the issue properties from rietveld."""
issue = input_api.change.issue
if issue and input_api.rietveld:
return input_api.rietveld.get_issue_properties(
issue=int(issue), messages=messages)
def _ReviewersFromChange(change):
"""Return the reviewers specified in the |change|, if any."""
reviewers = set()
if change.R:
reviewers.update(set([r.strip() for r in change.R.split(',')]))
if change.TBR:
reviewers.update(set([r.strip() for r in change.TBR.split(',')]))
# Drop reviewers that aren't specified in email address format.
return set(reviewer for reviewer in reviewers if '@' in reviewer)
def _RietveldOwnerAndReviewers(input_api, email_regexp, approval_needed=False):
"""Return the owner and reviewers of a change, if any.
If approval_needed is True, only reviewers who have approved the change
will be returned.
"""
issue_props = _GetRietveldIssueProps(input_api, True)
if not issue_props:
reviewers = set()
if not approval_needed:
reviewers = _ReviewersFromChange(input_api.change)
return None, reviewers
if not approval_needed:
return issue_props['owner_email'], set(issue_props['reviewers'])
owner_email = issue_props['owner_email']
def match_reviewer(r):
return email_regexp.match(r) and r != owner_email
messages = issue_props.get('messages', [])
approvers = set(
m['sender'] for m in messages
if m.get('approval') and match_reviewer(m['sender']))
return owner_email, approvers
def _CheckConstNSObject(input_api, output_api, source_file_filter):
"""Checks to make sure no objective-c files have |const NSSomeClass*|."""
pattern = input_api.re.compile(
r'(?<!reinterpret_cast<)'
r'const\s+NS(?!(Point|Range|Rect|Size)\s*\*)\w*\s*\*')
def objective_c_filter(f):
return (source_file_filter(f) and
input_api.os_path.splitext(f.LocalPath())[1] in ('.h', '.m', '.mm'))
files = []
for f in input_api.AffectedSourceFiles(objective_c_filter):
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if files:
if input_api.is_committing:
res_type = output_api.PresubmitPromptWarning
else:
res_type = output_api.PresubmitNotifyResult
return [ res_type('|const NSClass*| is wrong, see ' +
'http://dev.chromium.org/developers/clang-mac',
files) ]
return []
def CheckSingletonInHeaders(input_api, output_api, source_file_filter=None):
"""Deprecated, must be removed."""
return [
output_api.PresubmitNotifyResult(
'CheckSingletonInHeaders is deprecated, please remove it.')
]
def PanProjectChecks(input_api, output_api,
excluded_paths=None, text_files=None,
license_header=None, project_name=None,
owners_check=True, maxlen=80):
"""Checks that ALL chromium orbit projects should use.
These are checks to be run on all Chromium orbit project, including:
Chromium
Native Client
V8
When you update this function, please take this broad scope into account.
Args:
input_api: Bag of input related interfaces.
output_api: Bag of output related interfaces.
excluded_paths: Don't include these paths in common checks.
text_files: Which file are to be treated as documentation text files.
license_header: What license header should be on files.
project_name: What is the name of the project as it appears in the license.
Returns:
A list of warning or error objects.
"""
excluded_paths = tuple(excluded_paths or [])
text_files = tuple(text_files or (
r'.+\.txt$',
r'.+\.json$',
))
project_name = project_name or 'Chromium'
# Accept any year number from 2006 to the current year, or the special
# 2006-20xx string used on the oldest files. 2006-20xx is deprecated, but
# tolerated on old files.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2006, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + '|2006-2008|2006-2009|2006-2010)'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license_header = license_header or (
r'.*? Copyright (\(c\) )?%(year)s The %(project)s Authors\. '
r'All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.(?: \*/)?\n'
) % {
'year': years_re,
'project': project_name,
}
results = []
# This code loads the default black list (e.g. third_party, experimental, etc)
# and add our black list (breakpad, skia and v8 are still not following
# google style and are not really living this repository).
# See presubmit_support.py InputApi.FilterSourceFile for the (simple) usage.
black_list = input_api.DEFAULT_BLACK_LIST + excluded_paths
white_list = input_api.DEFAULT_WHITE_LIST + text_files
sources = lambda x: input_api.FilterSourceFile(x, black_list=black_list)
text_files = lambda x: input_api.FilterSourceFile(
x, black_list=black_list, white_list=white_list)
snapshot_memory = []
def snapshot(msg):
"""Measures & prints performance warning if a rule is running slow."""
dt2 = input_api.time.clock()
if snapshot_memory:
delta_ms = int(1000*(dt2 - snapshot_memory[0]))
if delta_ms > 500:
print " %s took a long time: %dms" % (snapshot_memory[1], delta_ms)
snapshot_memory[:] = (dt2, msg)
if owners_check:
snapshot("checking owners")
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
snapshot("checking long lines")
results.extend(input_api.canned_checks.CheckLongLines(
input_api, output_api, maxlen, source_file_filter=sources))
snapshot( "checking tabs")
results.extend(input_api.canned_checks.CheckChangeHasNoTabs(
input_api, output_api, source_file_filter=sources))
snapshot( "checking stray whitespace")
results.extend(input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
input_api, output_api, source_file_filter=sources))
snapshot("checking nsobjects")
results.extend(_CheckConstNSObject(
input_api, output_api, source_file_filter=sources))
# The following checks are only done on commit, since the commit bot will
# auto-fix most of these.
if input_api.is_committing:
snapshot("checking eol style")
results.extend(input_api.canned_checks.CheckChangeSvnEolStyle(
input_api, output_api, source_file_filter=text_files))
snapshot("checking svn mime types")
results.extend(input_api.canned_checks.CheckSvnForCommonMimeTypes(
input_api, output_api))
snapshot("checking license")
results.extend(input_api.canned_checks.CheckLicense(
input_api, output_api, license_header, source_file_filter=sources))
snapshot("checking was uploaded")
results.extend(input_api.canned_checks.CheckChangeWasUploaded(
input_api, output_api))
snapshot("checking description")
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(input_api.canned_checks.CheckDoNotSubmitInDescription(
input_api, output_api))
snapshot("checking do not submit in files")
results.extend(input_api.canned_checks.CheckDoNotSubmitInFiles(
input_api, output_api))
snapshot("done")
return results
def CheckPatchFormatted(input_api, output_api):
import git_cl
cmd = ['cl', 'format', '--dry-run', input_api.PresubmitLocalPath()]
code, _ = git_cl.RunGitWithCode(cmd, suppress_stderr=True)
if code == 2:
short_path = input_api.basename(input_api.PresubmitLocalPath())
full_path = input_api.os_path.relpath(input_api.PresubmitLocalPath(),
input_api.change.RepositoryRoot())
return [output_api.PresubmitPromptWarning(
'The %s directory requires source formatting. '
'Please run git cl format %s' %
(short_path, full_path))]
# As this is just a warning, ignore all other errors if the user
# happens to have a broken clang-format, doesn't use git, etc etc.
return []
def CheckGNFormatted(input_api, output_api):
import gn
affected_files = input_api.AffectedFiles(
include_deletes=False,
file_filter=lambda x: x.LocalPath().endswith('.gn') or
x.LocalPath().endswith('.gni'))
warnings = []
for f in affected_files:
cmd = ['gn', 'format', '--dry-run', f.AbsoluteLocalPath()]
rc = gn.main(cmd)
if rc == 2:
warnings.append(output_api.PresubmitPromptWarning(
'%s requires formatting. Please run `gn format --in-place %s`.' % (
f.AbsoluteLocalPath(), f.LocalPath())))
# It's just a warning, so ignore other types of failures assuming they'll be
# caught elsewhere.
return warnings
| {
"content_hash": "f11cd859ad8c67c56318e5147680af24",
"timestamp": "",
"source": "github",
"line_count": 1134,
"max_line_length": 80,
"avg_line_length": 36.7962962962963,
"alnum_prop": 0.6654923670525079,
"repo_name": "liaorubei/depot_tools",
"id": "9179fb6ee35436d654ecae91ed4e02692514d0b3",
"size": "41894",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "presubmit_canned_checks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5028"
},
{
"name": "PHP",
"bytes": "569"
},
{
"name": "Python",
"bytes": "1763015"
},
{
"name": "Shell",
"bytes": "82675"
}
],
"symlink_target": ""
} |
import collections
import contextlib
import functools
import gc
import inspect
import itertools
import re
import sys
import uuid
import weakref
import eventlet.greenpool
import eventlet.greenthread
import semantic_version
import six
from yaql.language import contexts
import yaql.language.exceptions
import yaql.language.expressions
from yaql.language import utils as yaqlutils
from murano.dsl import constants
from murano.dsl import dsl_types
from murano.dsl import exceptions
_threads_sequencer = 0
# type string: ns.something.MyApp[/1.2.3-alpha][@my.package.fqn]
TYPE_RE = re.compile(r'([a-zA-Z0-9_.]+)(?:/([^@]+))?(?:@([a-zA-Z0-9_.]+))?$')
def evaluate(value, context, freeze=True):
list_type = tuple if freeze else list
dict_type = yaqlutils.FrozenDict if freeze else dict
set_type = frozenset if freeze else set
if isinstance(value, (dsl_types.YaqlExpression,
yaql.language.expressions.Statement)):
return value(context)
elif isinstance(value, yaqlutils.MappingType):
return dict_type(
(evaluate(d_key, context, freeze),
evaluate(d_value, context, freeze))
for d_key, d_value in six.iteritems(value))
elif yaqlutils.is_sequence(value):
return list_type(evaluate(t, context, freeze) for t in value)
elif isinstance(value, yaqlutils.SetType):
return set_type(evaluate(t, context, freeze) for t in value)
elif yaqlutils.is_iterable(value):
return list_type(
evaluate(t, context, freeze)
for t in yaqlutils.limit_iterable(
value, constants.ITERATORS_LIMIT))
elif isinstance(value, dsl_types.MuranoObjectInterface):
return value.object
else:
return value
def merge_lists(list1, list2):
result = []
for item in list1 + list2:
if item not in result:
result.append(item)
return result
def merge_dicts(dict1, dict2, max_levels=0):
result = {}
for key, value1 in dict1.items():
result[key] = value1
if key in dict2:
value2 = dict2[key]
if type(value2) != type(value1):
if ((isinstance(value1,
six.string_types) or value1 is None) and
(isinstance(value2,
six.string_types) or value2 is None)):
continue
raise TypeError()
if max_levels != 1 and isinstance(value2, dict):
result[key] = merge_dicts(
value1, value2,
0 if max_levels == 0 else max_levels - 1)
elif max_levels != 1 and isinstance(value2, list):
result[key] = merge_lists(value1, value2)
else:
result[key] = value2
for key, value1 in dict2.items():
if key not in result:
result[key] = value1
return result
def generate_id():
return uuid.uuid4().hex
def parallel_select(collection, func, limit=1000):
# workaround for eventlet issue 232
# https://github.com/eventlet/eventlet/issues/232
context = get_context()
object_store = get_object_store()
def wrapper(element):
try:
with with_object_store(object_store), contextual(context):
return func(element), False, None
except Exception as e:
return e, True, sys.exc_info()[2]
gpool = eventlet.greenpool.GreenPool(limit)
result = list(gpool.imap(wrapper, collection))
try:
exception = next(t for t in result if t[1])
except StopIteration:
return map(lambda t: t[0], result)
else:
six.reraise(exception[0], None, exception[2])
def enum(**enums):
return type('Enum', (), enums)
def get_context():
current_thread = eventlet.greenthread.getcurrent()
return getattr(current_thread, constants.TL_CONTEXT, None)
def get_executor():
store = get_object_store()
return None if store is None else store.executor
def get_type(context=None):
context = context or get_context()
return context[constants.CTX_TYPE]
def get_execution_session():
executor = get_executor()
return None if executor is None else executor.execution_session
def get_object_store():
current_thread = eventlet.greenthread.getcurrent()
return getattr(current_thread, constants.TL_OBJECT_STORE, None)
def get_package_loader():
executor = get_executor()
return None if executor is None else executor.package_loader
def get_this(context=None):
context = context or get_context()
return context[constants.CTX_THIS]
def get_caller_context(context=None):
context = context or get_context()
return context[constants.CTX_CALLER_CONTEXT]
def get_attribute_store():
executor = get_executor()
return None if executor is None else executor.attribute_store
def get_current_instruction(context=None):
context = context or get_context()
return context[constants.CTX_CURRENT_INSTRUCTION]
def get_current_method(context=None):
context = context or get_context()
return context[constants.CTX_CURRENT_METHOD]
def get_yaql_engine(context=None):
context = context or get_context()
return None if context is None else context[constants.CTX_YAQL_ENGINE]
def get_current_exception(context=None):
context = context or get_context()
return context[constants.CTX_CURRENT_EXCEPTION]
def are_property_modifications_allowed(context=None):
context = context or get_context()
return context[constants.CTX_ALLOW_PROPERTY_WRITES] or False
def get_names_scope(context=None):
context = context or get_context()
return context[constants.CTX_NAMES_SCOPE]
def get_class(name, context=None):
context = context or get_context()
murano_type = get_names_scope(context)
name = murano_type.namespace_resolver.resolve_name(name)
return murano_type.package.find_class(name)
def get_contract_passkey():
current_thread = eventlet.greenthread.getcurrent()
return getattr(current_thread, constants.TL_CONTRACT_PASSKEY, None)
def is_objects_dry_run_mode():
current_thread = eventlet.greenthread.getcurrent()
return bool(getattr(current_thread, constants.TL_OBJECTS_DRY_RUN, False))
def get_current_thread_id():
global _threads_sequencer
current_thread = eventlet.greenthread.getcurrent()
thread_id = getattr(current_thread, constants.TL_ID, None)
if thread_id is None:
thread_id = 'T' + str(_threads_sequencer)
_threads_sequencer += 1
setattr(current_thread, constants.TL_ID, thread_id)
return thread_id
@contextlib.contextmanager
def thread_local_attribute(name, value):
current_thread = eventlet.greenthread.getcurrent()
old_value = getattr(current_thread, name, None)
if value is not None:
setattr(current_thread, name, value)
elif hasattr(current_thread, name):
delattr(current_thread, name)
try:
yield
finally:
if old_value is not None:
setattr(current_thread, name, old_value)
elif hasattr(current_thread, name):
delattr(current_thread, name)
def contextual(ctx):
return thread_local_attribute(constants.TL_CONTEXT, ctx)
def with_object_store(object_store):
return thread_local_attribute(constants.TL_OBJECT_STORE, object_store)
def parse_version_spec(version_spec):
if isinstance(version_spec, semantic_version.Spec):
return normalize_version_spec(version_spec)
if isinstance(version_spec, semantic_version.Version):
return normalize_version_spec(
semantic_version.Spec('==' + str(version_spec)))
if not version_spec:
version_spec = '0'
version_spec = re.sub('\s+', '', str(version_spec))
if version_spec[0].isdigit():
version_spec = '==' + str(version_spec)
version_spec = semantic_version.Spec(version_spec)
return normalize_version_spec(version_spec)
def parse_version(version):
if isinstance(version, semantic_version.Version):
return version
if not version:
version = '0'
return semantic_version.Version.coerce(str(version))
def traverse(seed, producer=None, track_visited=True):
if not yaqlutils.is_iterable(seed):
seed = [seed]
visited = None if not track_visited else set()
queue = collections.deque(seed)
while queue:
item = queue.popleft()
if track_visited:
if item in visited:
continue
visited.add(item)
produced = (yield item)
if produced is None and producer:
produced = producer(item)
if produced:
queue.extend(produced)
def cast(obj, murano_class, pov_or_version_spec=None):
if isinstance(obj, dsl_types.MuranoObjectInterface):
obj = obj.object
if isinstance(pov_or_version_spec, dsl_types.MuranoType):
pov_or_version_spec = pov_or_version_spec.package
elif isinstance(pov_or_version_spec, six.string_types):
pov_or_version_spec = parse_version_spec(pov_or_version_spec)
if isinstance(murano_class, dsl_types.MuranoTypeReference):
murano_class = murano_class.type
if isinstance(murano_class, dsl_types.MuranoType):
if pov_or_version_spec is None:
pov_or_version_spec = parse_version_spec(murano_class.version)
murano_class = murano_class.name
candidates = []
for cls in itertools.chain((obj.type,), obj.type.ancestors()):
if cls.name != murano_class:
continue
elif isinstance(pov_or_version_spec, semantic_version.Version):
if cls.version != pov_or_version_spec:
continue
elif isinstance(pov_or_version_spec, semantic_version.Spec):
if cls.version not in pov_or_version_spec:
continue
elif isinstance(pov_or_version_spec, dsl_types.MuranoPackage):
requirement = pov_or_version_spec.requirements.get(
cls.package.name)
if requirement is None:
raise exceptions.NoClassFound(murano_class)
if cls.version not in requirement:
continue
elif pov_or_version_spec is not None:
raise ValueError('pov_or_version_spec of unsupported '
'type {0}'.format(type(pov_or_version_spec)))
candidates.append(cls)
if not candidates:
raise exceptions.NoClassFound(murano_class)
elif len(candidates) > 1:
raise exceptions.AmbiguousClassName(murano_class)
return obj.cast(candidates[0])
def is_instance_of(obj, class_name, pov_or_version_spec=None):
if not isinstance(obj, (dsl_types.MuranoObject,
dsl_types.MuranoObjectInterface)):
return False
try:
cast(obj, class_name, pov_or_version_spec)
return True
except (exceptions.NoClassFound, exceptions.AmbiguousClassName):
return False
def memoize(func):
cache = {}
return get_memoize_func(func, cache)
def get_memoize_func(func, cache):
@functools.wraps(func)
def wrap(*args):
if args not in cache:
result = func(*args)
cache[args] = result
return result
else:
return cache[args]
return wrap
def normalize_version_spec(version_spec):
def coerce(v):
return semantic_version.Version('{0}.{1}.{2}'.format(
v.major, v.minor or 0, v.patch or 0
))
def increment(v):
# NOTE(ativelkov): replace these implementations with next_minor() and
# next_major() calls when the semantic_version is updated in global
# requirements.
if v.minor is None:
return semantic_version.Version(
'.'.join(str(x) for x in [v.major + 1, 0, 0]))
else:
return semantic_version.Version(
'.'.join(str(x) for x in [v.major, v.minor + 1, 0]))
def extend(v):
return semantic_version.Version(str(v) + '-0')
transformations = {
'>': [('>=', (increment, extend))],
'>=': [('>=', (coerce,))],
'<': [('<', (coerce, extend))],
'<=': [('<', (increment, extend))],
'!=': [('>=', (increment, extend))],
'==': [('>=', (coerce,)), ('<', (increment, coerce, extend))]
}
new_parts = []
for item in version_spec.specs:
if item.kind == '*':
continue
elif item.spec.patch is not None:
new_parts.append(str(item))
else:
for op, funcs in transformations[item.kind]:
new_parts.append('{0}{1}'.format(
op,
six.moves.reduce(lambda v, f: f(v), funcs, item.spec)
))
if not new_parts:
return semantic_version.Spec('*')
return semantic_version.Spec(*new_parts)
semver_to_api_map = {
'>': 'gt',
'>=': 'ge',
'<': 'lt',
'<=': 'le',
'!=': 'ne',
'==': 'eq'
}
def breakdown_spec_to_query(normalized_spec):
res = []
for item in normalized_spec.specs:
if item.kind == '*':
continue
else:
res.append("%s:%s" % (semver_to_api_map[item.kind],
item.spec))
return res
def link_contexts(parent_context, context):
if not context:
return parent_context
return contexts.LinkedContext(parent_context, context)
def inspect_is_static(cls, name):
m = cls.__dict__.get(name)
if m is None:
return False
return isinstance(m, staticmethod)
def inspect_is_classmethod(cls, name):
m = cls.__dict__.get(name)
if m is None:
return False
return isinstance(m, classmethod)
def inspect_is_method(cls, name):
m = getattr(cls, name, None)
if m is None:
return False
return ((inspect.isfunction(m) or inspect.ismethod(m)) and not
inspect_is_static(cls, name) and not
inspect_is_classmethod(cls, name))
def inspect_is_property(cls, name):
m = getattr(cls, name, None)
if m is None:
return False
return inspect.isdatadescriptor(m)
def updated_dict(d, val):
if d is None:
d = {}
else:
d = d.copy()
if val is not None:
d.update(val)
return d
def resolve_type(value, scope_type, return_reference=False):
if value is None:
return None
if isinstance(scope_type, dsl_types.MuranoTypeReference):
scope_type = scope_type.type
if not isinstance(value, (dsl_types.MuranoType,
dsl_types.MuranoTypeReference)):
name = scope_type.namespace_resolver.resolve_name(value)
result = scope_type.package.find_class(name)
else:
result = value
if isinstance(result, dsl_types.MuranoTypeReference):
if return_reference:
return result
return result.type
elif return_reference:
return result.get_reference()
return result
def parse_object_definition(spec, scope_type, context):
if not isinstance(spec, yaqlutils.MappingType):
return None
if context:
spec = evaluate(spec, context, freeze=False)
else:
spec = spec.copy()
system_data = None
type_obj = None
props = {}
ns_resolver = scope_type.namespace_resolver if scope_type else None
for key in spec:
if (ns_resolver and ns_resolver.is_typename(key, False) or
isinstance(key, (dsl_types.MuranoTypeReference,
dsl_types.MuranoType))):
type_obj = resolve_type(key, scope_type)
props = spec.pop(key) or {}
system_data = spec
break
if system_data is None:
props = spec
if '?' in spec:
system_data = spec.pop('?')
obj_type = system_data.get('type')
if isinstance(obj_type, dsl_types.MuranoTypeReference):
type_obj = obj_type.type
elif isinstance(obj_type, dsl_types.MuranoType):
type_obj = obj_type
elif obj_type:
type_str, version_str, package_str = parse_type_string(
obj_type,
system_data.get('classVersion'),
system_data.get('package')
)
version_spec = parse_version_spec(version_str)
package_loader = get_package_loader()
if package_str:
package = package_loader.load_package(
package_str, version_spec)
else:
package = package_loader.load_class_package(
type_str, version_spec)
type_obj = package.find_class(type_str, False)
else:
system_data = {}
return {
'type': type_obj,
'properties': yaqlutils.filter_parameters_dict(props),
'id': system_data.get('id'),
'name': system_data.get('name'),
'destroyed': system_data.get('destroyed', False),
'dependencies': system_data.get('dependencies', {}),
'extra': {
key: value for key, value in six.iteritems(system_data)
if key.startswith('_')
}
}
def assemble_object_definition(parsed, model_format=dsl_types.DumpTypes.Mixed):
if model_format == dsl_types.DumpTypes.Inline:
result = {
parsed['type']: parsed['properties'],
'id': parsed['id'],
'name': parsed['name'],
'dependencies': parsed['dependencies'],
'destroyed': parsed['destroyed']
}
result.update(parsed['extra'])
return result
result = parsed['properties']
header = {
'id': parsed['id'],
'name': parsed['name']
}
if parsed['destroyed']:
header['destroyed'] = True
header.update(parsed['extra'])
result['?'] = header
if model_format == dsl_types.DumpTypes.Mixed:
header['type'] = parsed['type']
return result
elif model_format == dsl_types.DumpTypes.Serializable:
cls = parsed['type']
if cls:
header['type'] = format_type_string(cls)
return result
else:
raise ValueError('Invalid Serialization Type')
def function(c):
if hasattr(c, 'im_func'):
return c.im_func
return c
def list_value(v):
if v is None:
return []
if not yaqlutils.is_sequence(v):
v = [v]
return v
def weak_proxy(obj):
if obj is None or isinstance(obj, weakref.ProxyType):
return obj
if isinstance(obj, weakref.ReferenceType):
obj = obj()
return weakref.proxy(obj)
def weak_ref(obj):
class MuranoObjectWeakRef(weakref.ReferenceType):
def __init__(self, murano_object):
self.ref = weakref.ref(murano_object)
self.object_id = murano_object.object_id
def __call__(self):
res = self.ref()
if not res:
object_store = get_object_store()
if object_store:
res = object_store.get(self.object_id)
if res:
self.ref = weakref.ref(res)
return res
if obj is None or isinstance(obj, weakref.ReferenceType):
return obj
if isinstance(obj, dsl_types.MuranoObject):
return MuranoObjectWeakRef(obj)
return weakref.ref(obj)
def parse_type_string(type_str, default_version, default_package):
res = TYPE_RE.match(type_str)
if res is None:
return None
parsed_type = res.group(1)
parsed_version = res.group(2)
parsed_package = res.group(3)
return (
parsed_type,
default_version if parsed_version is None else parsed_version,
default_package if parsed_package is None else parsed_package
)
def format_type_string(type_obj):
if isinstance(type_obj, dsl_types.MuranoTypeReference):
type_obj = type_obj.type
if isinstance(type_obj, dsl_types.MuranoType):
return '{0}/{1}@{2}'.format(
type_obj.name, type_obj.version, type_obj.package.name)
else:
raise ValueError('Invalid argument')
def patch_dict(dct, path, value):
parts = path.split('.')
for i in range(len(parts) - 1):
if not isinstance(dct, dict):
dct = None
break
dct = dct.get(parts[i])
if isinstance(dct, dict):
if value is yaqlutils.NO_VALUE:
dct.pop(parts[-1])
else:
dct[parts[-1]] = value
def format_scalar(value):
if isinstance(value, six.string_types):
return "'{0}'".format(value)
return six.text_type(value)
def is_passkey(value):
passkey = get_contract_passkey()
return passkey is not None and value is passkey
def find_object_owner(obj, predicate):
p = obj.owner
while p:
if predicate(p):
return p
p = p.owner
return None
# This function is not intended to be used in the code but is very useful
# for debugging object reference leaks
def walk_gc(obj, towards, handler):
visited = set()
queue = collections.deque([(obj, [])])
while queue:
item, trace = queue.popleft()
if id(item) in visited:
continue
if handler(item):
if towards:
yield trace + [item]
else:
yield [item] + trace
visited.add(id(item))
if towards:
queue.extend(
[(t, trace + [item]) for t in gc.get_referrers(item)]
)
else:
queue.extend(
[(t, [item] + trace) for t in gc.get_referents(item)]
)
| {
"content_hash": "f5dc02805ac3ee397f03b6ba4c080300",
"timestamp": "",
"source": "github",
"line_count": 721,
"max_line_length": 79,
"avg_line_length": 30.21775312066574,
"alnum_prop": 0.6032496442832882,
"repo_name": "DavidPurcell/murano_temp",
"id": "57757a8a885f98d4e0a9a3adae89020e9c3608f6",
"size": "22401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "murano/dsl/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "304"
},
{
"name": "Mako",
"bytes": "2026"
},
{
"name": "PowerShell",
"bytes": "2966"
},
{
"name": "Puppet",
"bytes": "86"
},
{
"name": "Python",
"bytes": "1758483"
},
{
"name": "Ruby",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "34202"
}
],
"symlink_target": ""
} |
{
' (leave empty to detach account)': ' (leave empty to detach account)',
' Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': ' Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.',
' by ': ' by ',
' is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities.': ' is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities.',
' on ': ' on ',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'# of Houses Damaged': '# of Houses Damaged',
'# of Houses Destroyed': 'Số căn nhà bị phá hủy',
'# of International Staff': '# of International Staff',
'# of National Staff': '# of National Staff',
'# of People Affected': 'Số người bị ảnh hưởng',
'# of People Deceased': '# of People Deceased',
'# of People Injured': '# of People Injured',
'# of Vehicles': '# of Vehicles',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s rows deleted',
'%s rows updated': '%s rows updated',
'(Constraints Only)': '(Constraints Only)',
') & then click on the map below to adjust the Lat/Lon fields:': ') & then click on the map below to adjust the Lat/Lon fields:',
'* Required Fields': '* Required Fields',
'0-15 minutes': '0-15 minutes',
'1 Assessment': '1 Assessment',
'1 location, shorter time, can contain multiple Tasks': '1 location, shorter time, can contain multiple Tasks',
'1-3 days': '1-3 days',
'1. Fill the necessary fields in BLOCK letters.': '1. Fill the necessary fields in BLOCK letters.',
'15-30 minutes': '15-30 phút',
'2 different options are provided here currently:': '2 different options are provided here currently:',
'2. Always use one box per letter and leave one box space to seperate words.': '2. Always use one box per letter and leave one box space to seperate words.',
'2x4 Car': '2x4 Car',
'30-60 minutes': '30-60 minutes',
'4-7 days': '4-7 days',
'4x4 Car': '4x4 Car',
'8-14 days': '8-14 ngày',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'tài liệu tham khảo như file, URL hay thông tin liên hệ đế xác nhận dữ liệu này.Bạn có thể gõ một vài ký tự đầu của tên tài liệu để kết nối tới tài liệu có sẵn',
'A Warehouse is a physical place to store items.': 'A Warehouse is a physical place to store items.',
'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': 'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.',
'A brief description of the group (optional)': 'A brief description of the group (optional)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'A file downloaded from a GPS containing a series of geographic points in XML format.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.',
'A library of digital resources, such as photos, documents and reports': 'A library of digital resources, such as photos, documents and reports',
'A place within a Site like a Shelf, room, bin number etc.': 'Một nơi trên site như số ngăn ,số phòng,số thùng v.v',
'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.',
'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': 'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': 'Upload ảnh chụp vị trí hoặc tài liệu bổ sung chứa thông tin bổ sung về trang web tại đây',
'A survey series with id %s does not exist. Please go back and create one.': 'A survey series with id %s does not exist. Please go back and create one.',
'ABOUT': 'ABOUT',
'ABOUT THIS MODULE': 'Giới thiệu Module này',
'ACCESS DATA': 'ACCESS DATA',
'ANY': 'ANY',
'API is documented here': 'API is documented here',
'Ability to Fill Out Surveys': 'Ability to Fill Out Surveys',
'Ability to customize the list of details tracked at a Shelter': 'Ability to customize the list of details tracked at a Shelter',
'Ability to customize the list of human resource tracked at a Shelter': 'Khả năng tùy chỉnh danh sách nguồn nhân lực theo dõi tại nơi cư trú',
'Ability to customize the list of important facilities needed at a Shelter': 'Khả năng tùy chỉnh danh sách các điều kiện quan trọng cần thiết tại một cơ sở cư trú',
'Ability to track partial fulfillment of the request': 'Ability to track partial fulfillment of the request',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Ability to view Results of Completed and/or partially filled out Surveys',
'About': 'About',
'About Sahana': 'About Sahana',
'About Sahana Eden': 'About Sahana Eden',
'About this module': 'About this module',
'Access denied': 'Access denied',
'Accessibility of Affected Location': 'Accessibility of Affected Location',
'Account registered, however registration is still pending approval - please wait until confirmation received.': 'Account registered, however registration is still pending approval - please wait until confirmation received.',
'Acronym': 'Acronym',
"Acronym of the organization's name, eg. IFRC.": "Acronym of the organization's name, eg. IFRC.",
'Actionable': 'Actionable',
'Actionable by all targeted recipients': 'Actionable by all targeted recipients',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>',
'Actioned?': 'Actioned?',
'Active Problems': 'Active Problems',
'Activities': 'Activities',
'Activities matching Assessments:': 'Activities matching Assessments:',
'Activities of boys 13-17yrs before disaster': 'Activities of boys 13-17yrs before disaster',
'Activities of boys 13-17yrs now': 'Activities of boys 13-17yrs now',
'Activities of boys <12yrs before disaster': 'Activities of boys <12yrs before disaster',
'Activities of boys <12yrs now': 'Activities of boys <12yrs now',
'Activities of girls 13-17yrs before disaster': 'Activities of girls 13-17yrs before disaster',
'Activities of girls 13-17yrs now': 'Activities of girls 13-17yrs now',
'Activities of girls <12yrs before disaster': 'Activities of girls <12yrs before disaster',
'Activities of girls <12yrs now': 'Activities of girls <12yrs now',
'Activities:': 'Activities:',
'Activity': 'Activity',
'Activity Added': 'Activity Added',
'Activity Deleted': 'Activity Deleted',
'Activity Details': 'Chi tiết Hoạt động',
'Activity Report': 'Activity Report',
'Activity Reports': 'Activity Reports',
'Activity Type': 'Activity Type',
'Activity Updated': 'Activity Updated',
'Add': 'Thêm',
'Add Activity': 'Thêm hoạt động',
'Add Activity Report': 'Add Activity Report',
'Add Activity Type': 'Thêm loại hoạt động',
'Add Address': 'Add Address',
'Add Aid Request': 'Thêm yêu cầu cứu trợ',
'Add Assessment': 'Add Assessment',
'Add Assessment Summary': 'Add Assessment Summary',
'Add Baseline': 'Add Baseline',
'Add Baseline Type': 'Add Baseline Type',
'Add Bed Type': 'Add Bed Type',
'Add Bin Type': 'Add Bin Type',
'Add Bins': 'Thêm Bin',
'Add Budget': 'Add Budget',
'Add Bundle': 'Add Bundle',
'Add Catalog': 'Thêm Catalog',
'Add Catalog Item': 'Add Catalog Item',
'Add Catalog.': 'Add Catalog.',
'Add Category': 'Thêm nhóm',
'Add Category<>Sub-Category<>Catalog Relation': 'Add Category<>Sub-Category<>Catalog Relation',
'Add Cholera Treatment Capability Information': 'Add Cholera Treatment Capability Information',
'Add Cluster': 'Thêm cụm',
'Add Cluster Subsector': 'Add Cluster Subsector',
'Add Config': 'Add Config',
'Add Contact': 'Thêm thông tin liên lạc',
'Add Contact Information': 'Thêm thông tin liên lạc',
'Add Disaster Victims': 'Add Disaster Victims',
'Add Distribution': 'Add Distribution',
'Add Distribution.': 'Add Distribution.',
'Add Donor': 'Thêm tên người quyên góp vào danh sách',
'Add Feature Class': 'Add Feature Class',
'Add Feature Layer': 'Thêm lớp đặc tính',
'Add Flood Report': 'Add Flood Report',
'Add Group': 'Thêm nhóm',
'Add Group Member': 'Add Group Member',
'Add Hospital': 'Thêm Bệnh viện',
'Add Identification Report': 'Add Identification Report',
'Add Identity': 'Thêm thông tin định danh',
'Add Image': 'Thêm ảnh',
'Add Impact': 'Add Impact',
'Add Impact Type': 'Add Impact Type',
'Add Incident': 'Add Incident',
'Add Incident Report': 'Thêm Báo cáo sự việc',
'Add Inventory Item': 'Add Inventory Item',
'Add Inventory Store': 'Add Inventory Store',
'Add Item': 'Add Item',
'Add Item (s)': 'Add Item (s)',
'Add Item Catalog': 'Add Item Catalog',
'Add Item Catalog ': 'Add Item Catalog ',
'Add Item Catalog Category ': 'Thêm tiêu chí cho catalog hàng hóa',
'Add Item Category': 'Add Item Category',
'Add Item Packet': 'Add Item Packet',
'Add Item Sub-Category': 'Add Item Sub-Category',
'Add Key': 'Add Key',
'Add Kit': 'Thêm Kit',
'Add Layer': 'Thêm lớp',
'Add Location': 'Add Location',
'Add Locations': 'Thêm địa điểm mới',
'Add Log Entry': 'Add Log Entry',
'Add Member': 'Thêm thành viên',
'Add Membership': 'Add Membership',
'Add Message': 'Thêm Tin nhắn',
'Add Need': 'Add Need',
'Add Need Type': 'Add Need Type',
'Add New': 'Add New',
'Add New Activity': 'Add New Activity',
'Add New Address': 'Thêm Địa chỉ mới',
'Add New Aid Request': 'Thêm yêu cầu cứu trợ mới',
'Add New Assessment': 'Add New Assessment',
'Add New Assessment Summary': 'Add New Assessment Summary',
'Add New Baseline': 'Add New Baseline',
'Add New Baseline Type': 'Add New Baseline Type',
'Add New Bin': 'Add New Bin',
'Add New Bin Type': 'Add New Bin Type',
'Add New Budget': 'Add New Budget',
'Add New Bundle': 'Add New Bundle',
'Add New Catalog Item': 'Add New Catalog Item',
'Add New Cluster': 'Thêm cụm mới',
'Add New Cluster Subsector': 'Add New Cluster Subsector',
'Add New Config': 'Thêm cấu hình mới',
'Add New Contact': 'Thêm đầu mối liên lạc mới',
'Add New Distribution': 'Add New Distribution',
'Add New Distribution Item': 'Add New Distribution Item',
'Add New Document': 'Thêm Tài liệu mới',
'Add New Donor': 'Thêm Người quyên góp',
'Add New Entry': 'Add New Entry',
'Add New Feature Class': 'Add New Feature Class',
'Add New Feature Layer': 'Add New Feature Layer',
'Add New Flood Report': 'Thêm báo cáo lũ lụt mới',
'Add New Group': 'Thêm nhóm mới',
'Add New Hospital': 'Thêm Bệnh viện mới',
'Add New Identity': 'Thêm thông tin nhận dạng mới',
'Add New Image': 'Thêm ảnh mới',
'Add New Impact': 'Add New Impact',
'Add New Impact Type': 'Add New Impact Type',
'Add New Incident': 'Thêm sự kiện',
'Add New Incident Report': 'Add New Incident Report',
'Add New Inventory Item': 'Add New Inventory Item',
'Add New Inventory Store': 'Add New Inventory Store',
'Add New Item': 'Add New Item',
'Add New Item Catalog': 'Add New Item Catalog',
'Add New Item Catalog Category': 'Add New Item Catalog Category',
'Add New Item Category': 'Add New Item Category',
'Add New Item Packet': 'Add New Item Packet',
'Add New Item Sub-Category': 'Add New Item Sub-Category',
'Add New Item to Kit': 'Add New Item to Kit',
'Add New Key': 'Thêm Key mới ',
'Add New Kit': 'Thêm Kit mới',
'Add New Layer': 'Add New Layer',
'Add New Location': 'Add New Location',
'Add New Log Entry': 'Add New Log Entry',
'Add New Marker': 'Add New Marker',
'Add New Member': 'Thêm thành viên mới',
'Add New Membership': 'Add New Membership',
'Add New Metadata': 'Add New Metadata',
'Add New Need': 'Add New Need',
'Add New Need Type': 'Add New Need Type',
'Add New Office': 'Thêm văn phòng mới',
'Add New Organization': 'Thêm một tô chức mới',
'Add New Photo': 'Add New Photo',
'Add New Position': 'Add New Position',
'Add New Problem': 'Thêm vấn đề mới',
'Add New Project': 'Add New Project',
'Add New Projection': 'Add New Projection',
'Add New Rapid Assessment': 'Add New Rapid Assessment',
'Add New Received Item': 'Add New Received Item',
'Add New Record': 'Add New Record',
'Add New Report': 'Thêm báo cáo mới',
'Add New Request': 'Thêm yêu cầu mới',
'Add New Request Item': 'Thêm yêu cầu hàng hóa mới',
'Add New Resource': 'Thêm nguồn lực mới',
'Add New Response': 'Thêm phản hồi mới',
'Add New River': 'Add New River',
'Add New Role': 'Thêm vai trò mới',
'Add New Role to User': 'Gán vai trò mới cho người dùng',
'Add New Sector': 'Add New Sector',
'Add New Sent Item': 'Add New Sent Item',
'Add New Setting': 'Add New Setting',
'Add New Shelter': 'Thêm Nơi cư trú mới',
'Add New Shelter Service': 'Add New Shelter Service',
'Add New Shelter Type': 'Add New Shelter Type',
'Add New Site': 'Add New Site',
'Add New Skill': 'Thêm kỹ năng mới',
'Add New Skill Type': 'Add New Skill Type',
'Add New Solution': 'Add New Solution',
'Add New Staff': 'Add New Staff',
'Add New Staff Type': 'Add New Staff Type',
'Add New Storage Location': 'Thêm Vị trí kho lưu trữ mới',
'Add New Survey Answer': 'Add New Survey Answer',
'Add New Survey Question': 'Add New Survey Question',
'Add New Survey Section': 'Add New Survey Section',
'Add New Survey Series': 'Add New Survey Series',
'Add New Survey Template': 'Thêm mẫu khảo sát mới',
'Add New Task': 'Thêm một công việc mới',
'Add New Team': 'Thêm đội mới',
'Add New Theme': 'Add New Theme',
'Add New Ticket': 'Add New Ticket',
'Add New Track': 'Add New Track',
'Add New Unit': 'Thêm đơn vị mới',
'Add New User': 'Thêm người dùng mới',
'Add New User to Role': 'Add New User to Role',
'Add New Warehouse': 'Add New Warehouse',
'Add New Warehouse Item': 'Add New Warehouse Item',
'Add Office': 'Thêm thông tin văn phòng',
'Add Organization': 'Thêm Tổ chức',
'Add Peer': 'Add Peer',
'Add Person': 'Thêm cá nhân',
'Add Personal Effects': 'Add Personal Effects',
'Add Photo': 'Add Photo',
'Add Position': 'Add Position',
'Add Problem': 'Add Problem',
'Add Project': 'Thêm dự án',
'Add Projection': 'Add Projection',
'Add Question': 'Add Question',
'Add Rapid Assessment': 'Add Rapid Assessment',
'Add Recipient': 'Thêm người nhận viện trợ',
'Add Recipient Site': 'Add Recipient Site',
'Add Recipient Site.': 'Add Recipient Site.',
'Add Record': 'Add Record',
'Add Recovery Report': 'Add Recovery Report',
'Add Reference Document': 'Add Reference Document',
'Add Report': 'Add Report',
'Add Request': 'Thêm yêu cầu',
'Add Request Detail': 'thêm chi tiết yêu cầu',
'Add Request Item': 'Thêm yêu cầu hàng hóa',
'Add Resource': 'Thêm tại nguyên',
'Add Response': 'Add Response',
'Add River': 'Add River',
'Add Role': 'Add Role',
'Add Section': 'Add Section',
'Add Sector': 'Thêm lĩnh vực',
'Add Sender Organization': 'Thêm tổ chức gửi',
'Add Sender Site': 'Add Sender Site',
'Add Sender Site.': 'Add Sender Site.',
'Add Service Profile': 'Add Service Profile',
'Add Setting': 'Add Setting',
'Add Shelter': 'Add Shelter',
'Add Shelter Service': 'Add Shelter Service',
'Add Shelter Type': 'Add Shelter Type',
'Add Shipment Transit Log': 'Add Shipment Transit Log',
'Add Shipment/Way Bills': 'Add Shipment/Way Bills',
'Add Site': 'Add Site',
'Add Skill': 'Thêm kỹ năng',
'Add Skill Type': 'Thêm loại kỹ năng',
'Add Skill Types': 'Thêm loại kỹ năng',
'Add Solution': 'Add Solution',
'Add Staff': 'Add Staff',
'Add Staff Type': 'Add Staff Type',
'Add Status': 'Add Status',
'Add Storage Bin ': 'Add Storage Bin ',
'Add Storage Bin Type': 'Add Storage Bin Type',
'Add Storage Location': 'Add Storage Location',
'Add Storage Location ': 'Add Storage Location ',
'Add Sub-Category': 'Thêm danh mục cấp dưới',
'Add Subscription': 'Add Subscription',
'Add Survey Answer': 'Thêm trả lời khảo sát',
'Add Survey Question': 'Thêm câu hỏi khảo sát',
'Add Survey Section': 'Add Survey Section',
'Add Survey Series': 'Add Survey Series',
'Add Survey Template': 'Thêm mẫu khảo sát',
'Add Task': 'Add Task',
'Add Team': 'Thêm đội',
'Add Theme': 'Add Theme',
'Add Ticket': 'Add Ticket',
'Add Unit': 'Thêm đơn vị',
'Add User': 'Thêm người dùng',
'Add Volunteer': 'Add Volunteer',
'Add Volunteer Registration': 'Thêm Đăng ký tình nguyện viên',
'Add Warehouse': 'Add Warehouse',
'Add Warehouse Item': 'Add Warehouse Item',
'Add a Person': 'Add a Person',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.',
'Add a Volunteer': 'Add a Volunteer',
'Add a new Site from where the Item is being sent.': 'Thêm Site nơi gửi hàng hóa đến ',
'Add a new Site where the Item is being sent to.': 'Add a new Site where the Item is being sent to.',
'Add an Photo.': 'Add an Photo.',
'Add main Item Category.': 'Add main Item Category.',
'Add main Item Sub-Category.': 'Add main Item Sub-Category.',
'Add new Group': 'Add new Group',
'Add new Individual': 'Add new Individual',
'Add new position.': 'Thêm địa điểm mới',
'Add new project.': 'Thêm dự án mới',
'Add new staff role.': 'Thêm vai trò nhân viên mới',
'Add the Storage Bin Type.': 'Add the Storage Bin Type.',
'Add the Storage Location where this bin is located.': 'Add the Storage Location where this bin is located.',
'Add the Storage Location where this this Bin belongs to.': 'Thêm vị trí kho lưu trữ chứa Bin này',
'Add the main Warehouse/Site information where this Bin belongs to.': 'Add the main Warehouse/Site information where this Bin belongs to.',
'Add the main Warehouse/Site information where this Item is to be added.': 'Thêm thông tin Nhà kho/Site chứa hàng hóa đã được nhập thông tin',
'Add the main Warehouse/Site information where this Storage location is.': 'Add the main Warehouse/Site information where this Storage location is.',
'Add the unit of measure if it doesnt exists already.': 'Add the unit of measure if it doesnt exists already.',
'Add to Bundle': 'Add to Bundle',
'Add to Catalog': 'Add to Catalog',
'Add to budget': 'Add to budget',
'Add/Edit/Remove Layers': 'Thêm/Sửa/Xóa các lớp',
'Additional Beds / 24hrs': 'Additional Beds / 24hrs',
'Additional Comments': 'Additional Comments',
"Additional quantity quantifier – e.g. '4x5'.": "Additional quantity quantifier – e.g. '4x5'.",
'Address': 'Địa chỉ',
'Address Details': 'Address Details',
'Address Type': 'Loại địa chỉ',
'Address added': 'Địa chỉ đã được thêm',
'Address deleted': 'Đã xóa địa chỉ',
'Address updated': 'Address updated',
'Addresses': 'Các địa chỉ',
'Adequate': 'Adequate',
'Adequate food and water available': 'Adequate food and water available',
'Adjust Item(s) Quantity': 'Adjust Item(s) Quantity',
'Adjust Items due to Theft/Loss': 'Adjust Items due to Theft/Loss',
'Admin': 'Quản trị viên',
'Admin Email': 'Email của quản trị viên',
'Admin Name': 'Tên quản trị viên',
'Admin Tel': 'Số điện thoại của Quản trị viên',
'Administration': 'Quản trị',
'Administrator': 'Quản trị viên',
'Admissions/24hrs': 'Admissions/24hrs',
'Adolescent (12-20)': 'Adolescent (12-20)',
'Adolescent participating in coping activities': 'Adolescent participating in coping activities',
'Adult (21-50)': 'Adult (21-50)',
'Adult ICU': 'Adult ICU',
'Adult Psychiatric': 'Bệnh nhân tâm thần',
'Adult female': 'Nữ giới',
'Adult male': 'Adult male',
'Adults in prisons': 'Adults in prisons',
'Advanced Bin Search': 'Advanced Bin Search',
'Advanced Catalog Search': 'Tìm kiếm danh mục nâng cao',
'Advanced Category Search': 'Tìm kiếm danh mục nâng cao',
'Advanced Item Search': 'Advanced Item Search',
'Advanced Location Search': 'Tìm kiếm vị trí nâng cao',
'Advanced Site Search': 'Advanced Site Search',
'Advanced Sub-Category Search': 'Advanced Sub-Category Search',
'Advanced Unit Search': 'Advanced Unit Search',
'Advanced:': 'Advanced:',
'Advisory': 'Advisory',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.',
'Age Group': 'Nhóm tuổi',
'Age group': 'Age group',
'Age group does not match actual age.': 'Nhóm tuổi không phù hợp với tuổi thật',
'Aggravating factors': 'Aggravating factors',
'Aggregate Items': 'Aggregate Items',
'Agriculture': 'Agriculture',
'Aid Request': 'Yêu cầu cứu trợ',
'Aid Request Details': 'Chi tiết yêu cầu cứu trợ',
'Aid Request added': 'Đã thêm yêu cầu viện trợ',
'Aid Request deleted': 'Đã xóa yêu cầu cứu trợ',
'Aid Request updated': 'Đã cập nhật Yêu cầu cứu trợ',
'Aid Requests': 'yêu cầu cứu trợ',
'Air Transport Service': 'Air Transport Service',
'Air tajin': 'Air tajin',
'Aircraft Crash': 'Aircraft Crash',
'Aircraft Hijacking': 'Aircraft Hijacking',
'Airport Closure': 'Đóng cửa sân bay',
'Airspace Closure': 'Airspace Closure',
'Alcohol': 'Alcohol',
'Alert': 'Alert',
'All': 'All',
'All Inbound & Outbound Messages are stored here': 'All Inbound & Outbound Messages are stored here',
'All Locations': 'All Locations',
'All Requested Items': 'Hàng hóa được yêu cầu',
'All Resources': 'All Resources',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.',
'Allowed to push': 'Cho phép bấm nút',
'Allows a Budget to be drawn up': 'Allows a Budget to be drawn up',
'Allows authorized users to control which layers are available to the situation map.': 'Cho phép người dùng đã đăng nhập kiểm soát layer nào phù hợp với bản đồ tình huống',
'Alternative infant nutrition in use': 'Alternative infant nutrition in use',
'Alternative places for studying': 'Alternative places for studying',
'Alternative places for studying available': 'Alternative places for studying available',
'Ambulance Service': 'Ambulance Service',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.',
'Analysis of Completed Surveys': 'Analysis of Completed Surveys',
'Animal Die Off': 'Animal Die Off',
'Animal Feed': 'Animal Feed',
'Animals': 'Animals',
'Answer Choices (One Per Line)': 'Chọn câu trả lời',
'Anthropolgy': 'Anthropolgy',
'Antibiotics available': 'Antibiotics available',
'Antibiotics needed per 24h': 'Antibiotics needed per 24h',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': 'Thông tin có sẵn trong file như Timestamp,Tác giả, Kinh độ, Vĩ độ sẽ được đọc tự động',
'Apparent Age': 'Apparent Age',
'Apparent Gender': 'Apparent Gender',
'Appropriate clothing available': 'Appropriate clothing available',
'Appropriate cooking equipment/materials in HH': 'Appropriate cooking equipment/materials in HH',
'Approx. number of cases/48h': 'Approx. number of cases/48h',
'Approximately how many children under 5 with diarrhea in the past 48 hours?': 'Approximately how many children under 5 with diarrhea in the past 48 hours?',
'Archive not Delete': 'Archive not Delete',
'Arctic Outflow': 'Arctic Outflow',
'Are basic medical supplies available for health services since the disaster?': 'Are basic medical supplies available for health services since the disaster?',
'Are breast milk substitutes being used here since the disaster?': 'Are breast milk substitutes being used here since the disaster?',
'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?': 'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?',
'Are the chronically ill receiving sufficient care and assistance?': 'Are the chronically ill receiving sufficient care and assistance?',
'Are there adults living in prisons in this area?': 'Are there adults living in prisons in this area?',
'Are there alternative places for studying?': 'Are there alternative places for studying?',
'Are there cases of diarrhea among children under the age of 5?': 'Are there cases of diarrhea among children under the age of 5?',
'Are there children living in adult prisons in this area?': 'Are there children living in adult prisons in this area?',
'Are there children living in boarding schools in this area?': 'Are there children living in boarding schools in this area?',
'Are there children living in homes for disabled children in this area?': 'Are there children living in homes for disabled children in this area?',
'Are there children living in juvenile detention in this area?': 'Are there children living in juvenile detention in this area?',
'Are there children living in orphanages in this area?': 'Are there children living in orphanages in this area?',
'Are there children with chronical illnesses in your community?': 'Are there children with chronical illnesses in your community?',
'Are there health services functioning for the community since the disaster?': 'Are there health services functioning for the community since the disaster?',
'Are there older people living in care homes in this area?': 'Are there older people living in care homes in this area?',
'Are there older people with chronical illnesses in your community?': 'Are there older people with chronical illnesses in your community?',
'Are there people with chronical illnesses in your community?': 'Are there people with chronical illnesses in your community?',
'Are there separate latrines for women and men available?': 'Are there separate latrines for women and men available?',
'Are there staff present and caring for the residents in these institutions?': 'Are there staff present and caring for the residents in these institutions?',
'Area': 'Area',
'Assessment': 'Assessment',
'Assessment Details': 'Assessment Details',
'Assessment Reported': 'Assessment Reported',
'Assessment Summaries': 'Assessment Summaries',
'Assessment Summary Details': 'Assessment Summary Details',
'Assessment Summary added': 'Assessment Summary added',
'Assessment Summary deleted': 'Assessment Summary deleted',
'Assessment Summary updated': 'Assessment Summary updated',
'Assessment Type': 'Assessment Type',
'Assessment added': 'Assessment added',
'Assessment deleted': 'Assessment deleted',
'Assessment updated': 'Đã cập nhật Trị giá tính thuế',
'Assessments': 'Assessments',
'Assessments Needs vs. Activities': 'Assessments Needs vs. Activities',
'Assessments and Activities': 'Assessments and Activities',
'Assessments are structured reports done by Professional Organizations': 'Assessments are structured reports done by Professional Organizations',
'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': 'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments',
'Assessments:': 'Assessments:',
'Assessor': 'Assessor',
'Assign Storage Location': 'Assign Storage Location',
'Assign to Org.': 'Assign to Org.',
'Assigned': 'Assigned',
'Assigned To': 'Assigned To',
'Assigned to': 'Assigned to',
'Assistance for immediate repair/reconstruction of houses': 'Assistance for immediate repair/reconstruction of houses',
'Assistant': 'Assistant',
'At/Visited Location (not virtual)': 'At/Visited Location (not virtual)',
'Attend to information sources as described in <instruction>': 'Attend to information sources as described in <instruction>',
'Attribution': 'Attribution',
'Audit Read': 'Audit Read',
'Audit Write': 'Audit Write',
"Authenticate system's Twitter account": "Authenticate system's Twitter account",
'Author': 'Author',
'Automotive': 'Automotive',
'Available Beds': 'Available Beds',
'Available Messages': 'Available Messages',
'Available Records': 'Available Records',
'Available databases and tables': 'Available databases and tables',
'Available from': 'Available from',
'Available in Viewer?': 'Available in Viewer?',
'Available until': 'Sẵn sàng cho đến khi',
'Availablity': 'Availablity',
'Avalanche': 'Avalanche',
'Avoid the subject event as per the <instruction>': 'Avoid the subject event as per the <instruction>',
'Babies who are not being breastfed, what are they being fed on?': 'Babies who are not being breastfed, what are they being fed on?',
'Baby And Child Care': 'Chăm sóc trẻ em',
'Background Colour': 'Background Colour',
'Background Colour for Text blocks': 'Background Colour for Text blocks',
'Bahai': 'Bahai',
'Baldness': 'Baldness',
'Balochi': 'Balochi',
'Banana': 'Banana',
'Bank/micro finance': 'Tài chính Ngân hàng',
'Base Layer?': 'Base Layer?',
'Base Unit': 'Đơn vị cơ sở',
'Baseline Number of Beds': 'Baseline Number of Beds',
'Baseline Type': 'Baseline Type',
'Baseline Type Details': 'Baseline Type Details',
'Baseline Type added': 'Baseline Type added',
'Baseline Type deleted': 'Baseline Type deleted',
'Baseline Type updated': 'Baseline Type updated',
'Baseline Types': 'Baseline Types',
'Baseline added': 'Baseline added',
'Baseline deleted': 'Baseline deleted',
'Baseline number of beds of that type in this unit.': 'Baseline number of beds of that type in this unit.',
'Baseline updated': 'Baseline updated',
'Baselines': 'Baselines',
'Baselines Details': 'Baselines Details',
'Basic Assessment': 'Basic Assessment',
'Basic Assessment Reported': 'Basic Assessment Reported',
'Basic Details': 'Basic Details',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': 'Thông tin cơ bản về các yêu cầu và quyên góp như thể loại, tên đơn vị, chi tiết liên lạc và tình trạng',
'Basic medical supplies available prior to disaster': 'Basic medical supplies available prior to disaster',
'Basic medical supplies available since disaster': 'Basic medical supplies available since disaster',
'Basic reports on the Shelter and drill-down by region': 'Báo cáo cơ bản về nơi cư trú và báo cáo chi tiết theo vùng',
'Baud': 'Bốt',
'Baud rate to use for your modem - The default is safe for most cases': 'Baud rate to use for your modem - The default is safe for most cases',
'Bed Capacity': 'Bed Capacity',
'Bed Capacity per Unit': 'Bed Capacity per Unit',
'Bed Type': 'Loại Giường',
'Bed type already registered': 'Bed type already registered',
'Bedding materials available': 'Bedding materials available',
'Beneficiary Type': 'Beneficiary Type',
'Biological Hazard': 'Biological Hazard',
'Biscuits': 'Biscuits',
'Blizzard': 'Blizzard',
'Blood Type (AB0)': 'Blood Type (AB0)',
'Blowing Snow': 'Gió tuyết đang thổi',
'Boat': 'Boat',
'Bodies found': 'Bodies found',
'Bodies recovered': 'Bodies recovered',
'Body': 'Body',
'Body Recovery Reports': 'Body Recovery Reports',
'Body Recovery Request': 'Body Recovery Request',
'Body Recovery Requests': 'Body Recovery Requests',
'Bomb': 'Bomb',
'Bomb Explosion': 'Nổ bom',
'Bomb Threat': 'Bomb Threat',
'Border Colour for Text blocks': 'Màu viền cho khối văn bản',
'Boys 13-18 yrs in affected area': 'Boys 13-18 yrs in affected area',
'Boys 13-18 yrs not attending school': 'Boys 13-18 yrs not attending school',
'Boys 6-12 yrs in affected area': 'Boys 6-12 yrs in affected area',
'Boys 6-12 yrs not attending school': 'Boys 6-12 yrs not attending school',
'Breast milk substitutes in use since disaster': 'Breast milk substitutes in use since disaster',
'Breast milk substitutes used prior to disaster': 'Breast milk substitutes used prior to disaster',
'Bricks': 'Bricks',
'Bridge Closed': 'Bridge Closed',
'Bucket': 'Bucket',
'Buddhist': 'Người theo đạo Phật',
'Budget': 'Ngân sách',
'Budget Details': 'Budget Details',
'Budget Updated': 'Budget Updated',
'Budget added': 'Budget added',
'Budget deleted': 'Budget deleted',
'Budget updated': 'Budget updated',
'Budgeting Module': 'Budgeting Module',
'Budgets': 'Ngân sách',
'Buffer': 'Buffer',
'Building Aide': 'Building Aide',
'Building Collapsed': 'Sập nhà',
'Built using the Template agreed by a group of NGOs working together as the': 'Built using the Template agreed by a group of NGOs working together as the',
'Bulk Uploader': 'Bulk Uploader',
'Bundle': 'Bundle',
'Bundle Contents': 'Bundle Contents',
'Bundle Details': 'Bundle Details',
'Bundle Updated': 'Cập nhật Bundle',
'Bundle added': 'Bundle added',
'Bundle deleted': 'Bundle deleted',
'Bundle updated': 'Bundle updated',
'Bundles': 'Bundles',
'Burn': 'Burn',
'Burn ICU': 'Burn ICU',
'Burned/charred': 'Burned/charred',
'Business damaged': 'Business damaged',
'By Warehouse': 'By Warehouse',
'CBA Women': 'CBA Women',
'CSS file %s not writable - unable to apply theme!': 'không viết được file CSS %s - không thể áp dụng chủ đề',
'Calculate': 'Tính toán',
'Camp': 'Camp',
'Camp Coordination/Management': 'Camp Coordination/Management',
'Can users register themselves for authenticated login access?': 'Can users register themselves for authenticated login access?',
"Can't import tweepy": "Can't import tweepy",
'Cancel': 'Cancel',
'Cancelled': 'Cancelled',
'Candidate Matches for Body %s': 'Candidate Matches for Body %s',
'Canned Fish': 'Canned Fish',
'Cannot be empty': 'Cannot be empty',
'Cannot delete whilst there are linked records. Please delete linked records first.': 'Không xóa được khi đang có bản thu liên quan.Hãy xóa bản thu trước',
'Capacity (Max Persons)': 'Capacity (Max Persons)',
'Capacity (W x D X H)': 'Capacity (W x D X H)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Nắm bắt thông tin của các nạn nhân chịu ảnh hưởng của thiên tai(Khách du lịch,Gia đình...)',
'Capture Information on each disaster victim': 'Capture Information on each disaster victim',
'Capturing organizational information of a relief organization and all the projects they have in the region': 'Capturing organizational information of a relief organization and all the projects they have in the region',
'Capturing the essential services each Volunteer is providing and where': 'Capturing the essential services each Volunteer is providing and where',
'Capturing the projects each organization is providing and where': 'Capturing the projects each organization is providing and where',
'Cardiology': 'Bệnh tim mạch',
'Cash available to restart business': 'Cash available to restart business',
'Cassava': 'Cassava',
'Casual Labor': 'Nhân công thời vụ',
'Catalog': 'Catalog',
'Catalog Item': 'Catalog Item',
'Catalog Item added': 'Catalog Item added',
'Catalog Item deleted': 'Catalog Item deleted',
'Catalog Item updated': 'Catalog Item updated',
'Catalog Items': 'Catalog Items',
'Catalog Name': 'Catalog Name',
'Category': 'Category',
'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog Relation',
'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog Relation added',
'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog Relation deleted',
'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog Relation updated',
'Central point to record details on People': 'Central point to record details on People',
'Change Password': 'Change Password',
'Check for errors in the URL, maybe the address was mistyped.': 'Kiểm tra lỗi URL, có lẽ địa chỉ URL bị gõ sai.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Kiểm tra URL trỏ về thư mục hay trang web',
'Check outbox for the message status': 'Check outbox for the message status',
'Check to delete': 'Check to delete',
'Checklist': 'Checklist',
'Checklist created': 'Checklist created',
'Checklist deleted': 'Checklist deleted',
'Checklist of Operations': 'Checklist of Operations',
'Checklist updated': 'Checklist updated',
'Chemical Hazard': 'Chemical Hazard',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack',
'Chicken': 'Chicken',
'Child': 'Child',
'Child (2-11)': 'Child (2-11)',
'Child (< 18 yrs)': 'Child (< 18 yrs)',
'Child Abduction Emergency': 'Child Abduction Emergency',
'Child headed households (<18 yrs)': 'Child headed households (<18 yrs)',
'Children (2-5 years)': 'Children (2-5 years)',
'Children (5-15 years)': 'Children (5-15 years)',
'Children (< 2 years)': 'Trẻ em (dưới 2 tuổi)',
'Children in adult prisons': 'Children in adult prisons',
'Children in boarding schools': 'Children in boarding schools',
'Children in homes for disabled children': 'Children in homes for disabled children',
'Children in juvenile detention': 'Children in juvenile detention',
'Children in orphanages': 'Children in orphanages',
'Children living on their own (without adults)': 'Children living on their own (without adults)',
'Children not enrolled in new school': 'Children not enrolled in new school',
'Children orphaned by the disaster': 'Children orphaned by the disaster',
'Children separated from their parents/caregivers': 'Children separated from their parents/caregivers',
'Children that have been sent to safe places': 'Children that have been sent to safe places',
'Children who have disappeared since the disaster': 'Children who have disappeared since the disaster',
'Children with chronical illnesses': 'Children with chronical illnesses',
'Chinese (Taiwan)': 'Chinese (Taiwan)',
'Cholera Treatment': 'Cholera Treatment',
'Cholera Treatment Capability': 'Cholera Treatment Capability',
'Cholera Treatment Center': 'Cholera Treatment Center',
'Cholera-Treatment-Center': 'Cholera-Treatment-Center',
'Choosing Skill and Resources of Volunteers': 'Choosing Skill and Resources of Volunteers',
'Christian': 'Christian',
'Church': 'Church',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': 'Hoàn cảnh mất tích, những nhân chứng nhìn thấy lần gần đây nhất nạn nhân còn sống',
'City': 'City',
'Civil Emergency': 'Civil Emergency',
'Clear Selection': 'Clear Selection',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": "Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.",
'Click on the link ': 'Click on the link ',
'Client IP': 'Client IP',
'Clinical Laboratory': 'Clinical Laboratory',
'Clinical Operations': 'Clinical Operations',
'Clinical Status': 'Clinical Status',
'Close map': 'Close map',
'Closed': 'Đã đóng',
'Closure': 'Closure',
'Clothing': 'Clothing',
'Cluster': 'Cluster',
'Cluster Details': 'Cluster Details',
'Cluster Distance': 'Cluster Distance',
'Cluster Subsector': 'Cluster Subsector',
'Cluster Subsector Details': 'Cluster Subsector Details',
'Cluster Subsector added': 'Cluster Subsector added',
'Cluster Subsector deleted': 'Cluster Subsector deleted',
'Cluster Subsector updated': 'Cluster Subsector updated',
'Cluster Subsectors': 'Cluster Subsectors',
'Cluster Threshold': 'Cluster Threshold',
'Cluster added': 'Đã thêm cụm',
'Cluster deleted': 'Cluster deleted',
'Cluster updated': 'Cluster updated',
'Cluster(s)': 'Cluster(s)',
'Clusters': 'Clusters',
'Code': 'Mã',
'Cold Wave': 'Cold Wave',
'Collective center': 'Collective center',
'Colour for Underline of Subheadings': 'Colour for Underline of Subheadings',
'Colour of Buttons when hovering': 'Colour of Buttons when hovering',
'Colour of bottom of Buttons when not pressed': 'Colour of bottom of Buttons when not pressed',
'Colour of bottom of Buttons when pressed': 'Colour of bottom of Buttons when pressed',
'Colour of dropdown menus': 'Colour of dropdown menus',
'Colour of selected Input fields': 'Màu của trường đã được chọn',
'Colour of selected menu items': 'Colour of selected menu items',
'Column Choices (One Per Line': 'Chọn cột',
'Combined Method': 'Combined Method',
'Come back later.': 'Come back later.',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.',
'Comments': 'Bình luận',
'Commiting a changed spreadsheet to the database': 'Commiting a changed spreadsheet to the database',
'Communication problems': 'Communication problems',
'Community Centre': 'Community Centre',
'Community Health Center': 'Trung tâm sức khỏe cộng đồng',
'Community Member': 'Thành viên cộng đồng',
'Complete Unit Label for e.g. meter for m.': 'hoàn thành các bản đơn vị, ví dụ đơn vị của mét là m',
'Completed': 'Completed',
'Complexion': 'Complexion',
'Compose': 'Compose',
'Compromised': 'Compromised',
'Config': 'Tùy chỉnh',
'Config added': 'Cấu hình đã được thêm',
'Config deleted': 'Config deleted',
'Config updated': 'Cập nhật tùy chỉnh',
'Configs': 'Configs',
'Configure Run-time Settings': 'Configure Run-time Settings',
'Confirmed': 'Confirmed',
'Confirmed Incidents': 'Confirmed Incidents',
'Conflict Details': 'Conflict Details',
'Conflict Resolution': 'Conflict Resolution',
'Consumable': 'Consumable',
'Contact': 'Contact',
'Contact Data': 'Dữ liệu liên lạc',
'Contact Details': 'Contact Details',
'Contact Information': 'Contact Information',
'Contact Method': 'Contact Method',
'Contact Person': 'Contact Person',
'Contact details': 'Contact details',
'Contact information added': 'Contact information added',
'Contact information deleted': 'Contact information deleted',
'Contact information updated': 'Contact information updated',
'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': 'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.',
'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': 'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.',
'Contact us': 'Contact us',
'Contacts': 'Contacts',
'Contents': 'Contents',
'Contradictory values!': 'Contradictory values!',
'Contributor': 'Người đóng góp',
'Conversion Tool': 'Conversion Tool',
'Cooking NFIs': 'Cooking NFIs',
'Cooking Oil': 'Cooking Oil',
'Coordinate Conversion': 'Coordinate Conversion',
'Copy': 'Copy',
'Copy any data from the one to be deleted into the one to keep': 'Copy any data from the one to be deleted into the one to keep',
'Corn': 'Corn',
'Cost Type': 'Cost Type',
'Cost per Megabyte': 'Cost per Megabyte',
'Cost per Minute': 'Cost per Minute',
"Couldn't import tweepy library": "Couldn't import tweepy library",
'Country': 'Country',
'Country of Residence': 'Country of Residence',
'Create & manage Distribution groups to receive Alerts': 'Create & manage Distribution groups to receive Alerts',
'Create Checklist': 'Create Checklist',
'Create Group Entry': 'Create Group Entry',
'Create Impact Assessment': 'Create Impact Assessment',
'Create Import Job': 'Create Import Job',
'Create Mobile Impact Assessment': 'Create Mobile Impact Assessment',
'Create New Import Job': 'Create New Import Job',
'Create Rapid Assessment': 'Create Rapid Assessment',
'Create Request': 'Khởi tạo yêu cầu',
'Create Task': 'Create Task',
'Create a group entry in the registry.': 'Create a group entry in the registry.',
'Create, enter, and manage surveys.': 'Create, enter, and manage surveys.',
'Creation of Surveys': 'Creation of Surveys',
'Crime': 'Tội phạm',
'Criteria': 'Criteria',
'Currency': 'Currency',
'Current Group Members': 'Nhóm thành viên hiện tại',
'Current Identities': 'Current Identities',
'Current Location': 'Current Location',
'Current Log Entries': 'Current Log Entries',
'Current Memberships': 'Thành viên hiện tại',
'Current Registrations': 'Current Registrations',
'Current Status': 'Current Status',
'Current Team Members': 'Current Team Members',
'Current Twitter account': 'Tài khoản Twitter hiện tại',
'Current greatest needs of vulnerable groups': 'Current greatest needs of vulnerable groups',
'Current main income sources': 'Current main income sources',
'Current major expenses': 'Current major expenses',
'Current number of patients': 'Current number of patients',
'Current problems, categories': 'Current problems, categories',
'Current problems, details': 'Current problems, details',
'Current request': 'Yêu cầu hiện tại',
'Current response': 'Current response',
'Current session': 'Current session',
'Current type of health problems, adults': 'Current type of health problems, adults',
'Current type of health problems, children': 'Current type of health problems, children',
'Current type of source for drinking water': 'Current type of source for drinking water',
'Current type of source for sanitary water': 'Current type of source for sanitary water',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'Custom Database Resource (e.g., anything defined as a resource in Sahana)',
'Customisable category of aid': 'Các tiêu chí cứu trợ có thể tùy chỉnh',
'DECISION': 'DECISION',
'DNA Profile': 'DNA Profile',
'DNA Profiling': 'DNA Profiling',
'Daily': 'Hàng ngày',
'Dam Overflow': 'Tràn đập',
'Dangerous Person': 'Người nguy hiểm',
'Data uploaded': 'Đã cập nhật dữ liệu',
'Database': 'Database',
'Date': 'Date',
'Date & Time': 'Date & Time',
'Date Requested': 'Date Requested',
'Date Required': 'Date Required',
'Date and Time': 'Date and Time',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': 'Ngày giờ nhận hàng hóa.Hiển thị thời gian theo mặc định nhưng vẫn có thể chỉnh sửa',
'Date and time this report relates to.': 'Date and time this report relates to.',
'Date of Birth': 'Date of Birth',
'Date of Latest Information on Beneficiaries Reached': 'Date of Latest Information on Beneficiaries Reached',
'Date of Report': 'Ngày báo cáo',
'Date/Time': 'Ngày/Giờ',
'Date/Time of Find': 'Ngày giờ tìm kiếm',
'Date/Time of disappearance': 'Date/Time of disappearance',
'De-duplicator': 'De-duplicator',
'Dead Body Details': 'Dead Body Details',
'Dead Body Reports': 'Báo cáo thiệt hại về người',
'Deaths in the past 24h': 'Deaths in the past 24h',
'Deaths/24hrs': 'Số người chết/24h',
'Debug': 'Debug',
'Decimal Degrees': 'Độ âm',
'Decomposed': 'Decomposed',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.',
'Default Marker': 'Default Marker',
'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.',
'Default synchronization policy': 'Chính sách đồng bộ hóa mặc định',
'Defaults': 'Defaults',
'Defaults updated': 'Defaults updated',
'Defecation area for animals': 'Defecation area for animals',
'Defines the icon used for display of features on handheld GPS.': 'Defines the icon used for display of features on handheld GPS.',
'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': 'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.',
'Defines the marker used for display & the attributes visible in the popup.': 'Defines the marker used for display & the attributes visible in the popup.',
'Degrees must be a number between -180 and 180': 'Degrees must be a number between -180 and 180',
'Dehydration': 'Dehydration',
'Delete': 'Delete',
'Delete Aid Request': 'Xóa yêu cầu cứu trợ',
'Delete Assessment': 'Delete Assessment',
'Delete Assessment Summary': 'Delete Assessment Summary',
'Delete Baseline': 'Delete Baseline',
'Delete Baseline Type': 'Delete Baseline Type',
'Delete Budget': 'Delete Budget',
'Delete Bundle': 'Delete Bundle',
'Delete Catalog Item': 'Delete Catalog Item',
'Delete Cluster Subsector': 'Delete Cluster Subsector',
'Delete Config': 'Delete Config',
'Delete Distribution': 'Delete Distribution',
'Delete Distribution Item': 'Delete Distribution Item',
'Delete Document': 'Delete Document',
'Delete Donor': 'Delete Donor',
'Delete Entry': 'Delete Entry',
'Delete Feature Class': 'Delete Feature Class',
'Delete Feature Layer': 'Delete Feature Layer',
'Delete Group': 'Delete Group',
'Delete Hospital': 'Xóa Bệnh viện',
'Delete Image': 'Delete Image',
'Delete Impact': 'Delete Impact',
'Delete Impact Type': 'Delete Impact Type',
'Delete Incident': 'Delete Incident',
'Delete Incident Report': 'Delete Incident Report',
'Delete Inventory Item': 'Delete Inventory Item',
'Delete Inventory Store': 'Xóa kho lưu trữ',
'Delete Item': 'Xóa mục',
'Delete Item Category': 'Delete Item Category',
'Delete Item Packet': 'Delete Item Packet',
'Delete Key': 'Delete Key',
'Delete Kit': 'Delete Kit',
'Delete Layer': 'Xóa Layer',
'Delete Location': 'Xóa Vị trí',
'Delete Marker': 'Delete Marker',
'Delete Membership': 'Delete Membership',
'Delete Message': 'Delete Message',
'Delete Metadata': 'Delete Metadata',
'Delete Need': 'Delete Need',
'Delete Need Type': 'Delete Need Type',
'Delete Office': 'Delete Office',
'Delete Old': 'Delete Old',
'Delete Organization': 'Delete Organization',
'Delete Peer': 'Delete Peer',
'Delete Person': 'Delete Person',
'Delete Photo': 'Delete Photo',
'Delete Project': 'Delete Project',
'Delete Projection': 'Delete Projection',
'Delete Rapid Assessment': 'Delete Rapid Assessment',
'Delete Received Item': 'Delete Received Item',
'Delete Received Shipment': 'Delete Received Shipment',
'Delete Record': 'Delete Record',
'Delete Recovery Report': 'Delete Recovery Report',
'Delete Report': 'Delete Report',
'Delete Request': 'Xóa yêu cầu',
'Delete Request Item': 'Xóa yêu cầu hàng hóa',
'Delete Resource': 'Delete Resource',
'Delete Section': 'Delete Section',
'Delete Sector': 'Delete Sector',
'Delete Sent Item': 'Delete Sent Item',
'Delete Sent Shipment': 'Delete Sent Shipment',
'Delete Service Profile': 'Delete Service Profile',
'Delete Setting': 'Delete Setting',
'Delete Skill': 'Delete Skill',
'Delete Skill Type': 'Delete Skill Type',
'Delete Staff Type': 'Delete Staff Type',
'Delete Status': 'Delete Status',
'Delete Subscription': 'Delete Subscription',
'Delete Survey Answer': 'Delete Survey Answer',
'Delete Survey Question': 'Xóa câu hỏi khảo sát',
'Delete Survey Section': 'Delete Survey Section',
'Delete Survey Series': 'Delete Survey Series',
'Delete Survey Template': 'Xóa mẫu khảo sát',
'Delete Unit': 'Xóa đơn vị',
'Delete User': 'Delete User',
'Delete Volunteer': 'Delete Volunteer',
'Delete Warehouse': 'Delete Warehouse',
'Delete Warehouse Item': 'Delete Warehouse Item',
'Delete from Server?': 'Delete from Server?',
'Delivered': 'Delivered',
'Delphi Decision Maker': 'Delphi Decision Maker',
'Demographic': 'Ngành nhân khẩu học',
'Demonstrations': 'Biểu tình',
'Dental Examination': 'Khám nha khoa',
'Dental Profile': 'Dental Profile',
'Department/Unit Name': 'Department/Unit Name',
'Deployment': 'Deployment',
'Describe the condition of the roads to your hospital.': 'Mô tả tình trạng các con đường tới bệnh viện.',
'Describe the procedure which this record relates to (e.g. "medical examination")': 'Describe the procedure which this record relates to (e.g. "medical examination")',
'Description': 'Mô tả',
'Description of Bin Type': 'Description of Bin Type',
'Description of Contacts': 'Description of Contacts',
'Description of defecation area': 'Mo tả khu vực defecation',
'Description of drinking water source': 'Description of drinking water source',
'Description of sanitary water source': 'Description of sanitary water source',
'Description of water source before the disaster': 'Description of water source before the disaster',
'Descriptive Text (e.g., Prose, etc)': 'Descriptive Text (e.g., Prose, etc)',
'Designated for': 'Designated for',
'Desire to remain with family': 'Desire to remain with family',
'Destination': 'Điểm đích',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": "Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.",
'Details': 'Details',
'Dialysis': 'Dialysis',
'Diarrhea': 'Diarrhea',
'Diarrhea among children under 5': 'Diarrhea among children under 5',
'Dignitary Visit': 'Dignitary Visit',
'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': 'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.',
'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': 'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.',
'Direction': 'Hướng',
'Disabled': 'Disabled',
'Disabled participating in coping activities': 'Disabled participating in coping activities',
'Disabled?': 'Disabled?',
'Disaster Victim Identification': 'Disaster Victim Identification',
'Disaster Victim Registry': 'Disaster Victim Registry',
'Disaster clean-up/repairs': 'Disaster clean-up/repairs',
'Discharge (cusecs)': 'Discharge (cusecs)',
'Discharges/24hrs': 'Discharges/24hrs',
'Discussion Forum': 'Discussion Forum',
'Discussion Forum on item': 'Discussion Forum on item',
'Disease vectors': 'Disease vectors',
'Dispatch': 'Gửi hàng cứu trợ',
'Dispatch Items': 'Dispatch Items',
'Dispensary': 'Y tế dự phòng',
'Displaced': 'Displaced',
'Displaced Populations': 'Displaced Populations',
'Display Polygons?': 'Display Polygons?',
'Display Routes?': 'Display Routes?',
'Display Tracks?': 'Display Tracks?',
'Display Waypoints?': 'Display Waypoints?',
'Dispose': 'Dispose',
'Dispose Expired/Unusable Items': 'Dispose Expired/Unusable Items',
'Distance between defecation area and water source': 'Distance between defecation area and water source',
'Distance between latrines and temporary shelter in meters': 'Distance between latrines and temporary shelter in meters',
'Distance between shelter and latrines': 'Distance between shelter and latrines',
'Distance(Kms)': 'Distance(Kms)',
'Distribution': 'Distribution',
'Distribution Details': 'Distribution Details',
'Distribution Item': 'Hàng hóa đóng góp',
'Distribution Item Details': 'Distribution Item Details',
'Distribution Item added': 'Distribution Item added',
'Distribution Item deleted': 'Distribution Item deleted',
'Distribution Item updated': 'Distribution Item updated',
'Distribution Items': 'Distribution Items',
'Distribution added': 'Distribution added',
'Distribution deleted': 'Distribution deleted',
'Distribution groups': 'Distribution groups',
'Distribution updated': 'Distribution updated',
'Distributions': 'Distributions',
'District': 'Quận',
'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do households each have at least 2 containers (10-20 litres each) to hold water?': 'Do households each have at least 2 containers (10-20 litres each) to hold water?',
'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': 'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?',
'Do households have bedding materials available (tarps, plastic mats, blankets)?': 'Do households have bedding materials available (tarps, plastic mats, blankets)?',
'Do households have household water storage containers?': 'Do households have household water storage containers?',
'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?': 'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?',
'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?': 'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?',
'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do women and girls have easy access to sanitary materials?': 'Do women and girls have easy access to sanitary materials?',
'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do you have access to cash to restart your business?': 'Do you have access to cash to restart your business?',
'Do you know of any incidents of violence?': 'Do you know of any incidents of violence?',
'Do you know of children living on their own (without adults)?': 'Do you know of children living on their own (without adults)?',
'Do you know of children separated from their parents or caregivers?': 'Do you know of children separated from their parents or caregivers?',
'Do you know of children that have been orphaned by the disaster?': 'Do you know of children that have been orphaned by the disaster?',
'Do you know of children that have been sent to safe places?': 'Do you know of children that have been sent to safe places?',
'Do you know of children that have disappeared without explanation in the period since the disaster?': 'Do you know of children that have disappeared without explanation in the period since the disaster?',
'Do you know of older people who are primary caregivers of children?': 'Do you know of older people who are primary caregivers of children?',
'Do you know of parents/caregivers missing children?': 'Do you know of parents/caregivers missing children?',
'Do you really want to delete these records?': 'Do you really want to delete these records?',
'Do you want to over-write the file metadata with new default values?': 'Bạn có muốn thay dữ liệu file bằng giá trị mặc định mới không?',
'Do you want to receive this shipment?': 'Do you want to receive this shipment?',
'Document': 'Document',
'Document Details': 'Chi tiết văn bản',
'Document Scan': 'Document Scan',
'Document added': 'Đã thêm tài liệu',
'Document deleted': 'Document deleted',
'Document updated': 'Document updated',
'Documents': 'Documents',
'Documents and Photos': 'Documents and Photos',
'Does this facility provide a cholera treatment center?': 'Does this facility provide a cholera treatment center?',
'Doing nothing (no structured activity)': 'Không làm gì (không có hoạt động theo kế hoạch',
'Dollars': 'Dollars',
'Domestic chores': 'Công việc nội trợ',
'Donation Phone #': 'Donation Phone #',
'Donor': 'Donor',
'Donor Details': 'Donor Details',
'Donor added': 'Đã thêm người quyên góp',
'Donor deleted': 'Donor deleted',
'Donor updated': 'Đã cập nhật người quyên góp',
'Donors': 'Donors',
'Donors Report': 'Báo cáo về tình hình quyên góp',
'Door frame': 'Door frame',
'Draft': 'Bản nháp',
'Drainage': 'Drainage',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Drawing up a Budget for Staff & Equipment across various Locations.',
'Drill Down by Group': 'Drill Down by Group',
'Drill Down by Incident': 'Drill Down by Incident',
'Drill Down by Shelter': 'Drill Down by Shelter',
'Driving License': 'Driving License',
'Drought': 'Drought',
'Drugs': 'Drugs',
'Dug Well': 'Dug Well',
'Duplicate?': 'Duplicate?',
'Duration': 'Duration',
'Dust Storm': 'Dust Storm',
'Dwellings': 'Dwellings',
'E-mail': 'E-mail',
'EMS Reason': 'EMS Reason',
'EMS Status': 'Tình trạng EMS',
'EMS Status Reason': 'EMS Status Reason',
'EMS Traffic Status': 'EMS Traffic Status',
'ER Status': 'ER Status',
'ER Status Reason': 'ER Status Reason',
'Early Recovery': 'Early Recovery',
'Earthquake': 'Động đất',
'Easy access to sanitation items for women/girls': 'Easy access to sanitation items for women/girls',
'Edit': 'Edit',
'Edit Activity': 'Edit Activity',
'Edit Address': 'Edit Address',
'Edit Aid Request': 'Chỉnh sửa Yêu cầu cứu trợ',
'Edit Application': 'Edit Application',
'Edit Assessment': 'Chỉnh sửa Đánh giá',
'Edit Assessment Summary': 'Edit Assessment Summary',
'Edit Baseline': 'Edit Baseline',
'Edit Baseline Type': 'Edit Baseline Type',
'Edit Budget': 'Edit Budget',
'Edit Bundle': 'Edit Bundle',
'Edit Catalog Item': 'Edit Catalog Item',
'Edit Category<>Sub-Category<>Catalog Relation': 'Edit Category<>Sub-Category<>Catalog Relation',
'Edit Cluster': 'Edit Cluster',
'Edit Cluster Subsector': 'Edit Cluster Subsector',
'Edit Config': 'Edit Config',
'Edit Contact': 'Edit Contact',
'Edit Contact Information': 'Chỉnh sửa thông tin liên lạc',
'Edit Contents': 'Edit Contents',
'Edit Defaults': 'Edit Defaults',
'Edit Description': 'Edit Description',
'Edit Details': 'Chỉnh sửa chi tiết',
'Edit Disaster Victims': 'Edit Disaster Victims',
'Edit Distribution': 'Chỉnh sửa Quyên góp',
'Edit Distribution Item': 'Edit Distribution Item',
'Edit Document': 'Chỉnh sửa tài liệu',
'Edit Donor': 'Edit Donor',
'Edit Email Settings': 'Edit Email Settings',
'Edit Feature Class': 'Edit Feature Class',
'Edit Feature Layer': 'Edit Feature Layer',
'Edit Flood Report': 'Edit Flood Report',
'Edit Gateway Settings': 'Edit Gateway Settings',
'Edit Group': 'Edit Group',
'Edit Hospital': 'Chỉnh sửa Bệnh viện',
'Edit Identification Report': 'Chỉnh sửa báo cáo định dạng',
'Edit Identity': 'Edit Identity',
'Edit Image': 'Edit Image',
'Edit Image Details': 'Edit Image Details',
'Edit Impact': 'Edit Impact',
'Edit Impact Type': 'Edit Impact Type',
'Edit Incident': 'Chỉnh sửa Các sự việc xảy ra',
'Edit Incident Report': 'Edit Incident Report',
'Edit Inventory Item': 'Edit Inventory Item',
'Edit Inventory Store': 'Edit Inventory Store',
'Edit Item': 'Edit Item',
'Edit Item Catalog': 'Edit Item Catalog',
'Edit Item Catalog Categories': 'Chỉnh sửa danh mục hàng hóa',
'Edit Item Category': 'Edit Item Category',
'Edit Item Packet': 'Edit Item Packet',
'Edit Item Sub-Categories': 'Edit Item Sub-Categories',
'Edit Key': 'Chỉnh sửa Key',
'Edit Kit': 'Edit Kit',
'Edit Layer': 'Edit Layer',
'Edit Location': 'Edit Location',
'Edit Log Entry': 'Edit Log Entry',
'Edit Map Services': 'Chỉnh sửa dịch vụ bản đồ',
'Edit Marker': 'Edit Marker',
'Edit Membership': 'Edit Membership',
'Edit Message': 'Edit Message',
'Edit Messaging Settings': 'Edit Messaging Settings',
'Edit Metadata': 'Chỉnh sửa dữ liệu',
'Edit Modem Settings': 'Edit Modem Settings',
'Edit Need': 'Edit Need',
'Edit Need Type': 'Edit Need Type',
'Edit Office': 'Edit Office',
'Edit Options': 'Edit Options',
'Edit Organization': 'Edit Organization',
'Edit Parameters': 'Edit Parameters',
'Edit Peer Details': 'Chỉnh sửa chi tiết nhóm người',
'Edit Person Details': 'Edit Person Details',
'Edit Personal Effects Details': 'Edit Personal Effects Details',
'Edit Photo': 'Edit Photo',
'Edit Pledge': 'Edit Pledge',
'Edit Position': 'Edit Position',
'Edit Problem': 'Chỉnh sửa Vấn đề',
'Edit Project': 'Edit Project',
'Edit Projection': 'Edit Projection',
'Edit Rapid Assessment': 'Edit Rapid Assessment',
'Edit Received Item': 'Edit Received Item',
'Edit Received Shipment': 'Edit Received Shipment',
'Edit Record': 'Edit Record',
'Edit Recovery Details': 'Chỉnh sửa chi tiết khôi phục',
'Edit Registration': 'Edit Registration',
'Edit Registration Details': 'Edit Registration Details',
'Edit Report': 'Chỉnh sửa báo cáo',
'Edit Request': 'Edit Request',
'Edit Request Item': 'Chỉnh sửa yêu cầu hàng hóa',
'Edit Resource': 'Edit Resource',
'Edit Response': 'Chỉnh sửa phản hồi',
'Edit River': 'Edit River',
'Edit Role': 'Chỉnh sửa Vai trò',
'Edit Sector': 'Edit Sector',
'Edit Sent Item': 'Edit Sent Item',
'Edit Sent Shipment': 'Edit Sent Shipment',
'Edit Setting': 'Chỉnh sửa cài đặt',
'Edit Settings': 'Edit Settings',
'Edit Shelter': 'Chỉnh sửa thông tin cư trú',
'Edit Shelter Service': 'Chỉnh sửa dịch vụ cư trú',
'Edit Shelter Type': 'Edit Shelter Type',
'Edit Shipment Transit Log': 'Edit Shipment Transit Log',
'Edit Shipment/Way Bills': 'Edit Shipment/Way Bills',
'Edit Shipment<>Item Relation': 'Edit Shipment<>Item Relation',
'Edit Site': 'Edit Site',
'Edit Skill': 'Chỉnh sửa kỹ năng',
'Edit Skill Type': 'Edit Skill Type',
'Edit Solution': 'Edit Solution',
'Edit Staff': 'Edit Staff',
'Edit Staff Type': 'Edit Staff Type',
'Edit Storage Bin Type(s)': 'Edit Storage Bin Type(s)',
'Edit Storage Bins': 'Edit Storage Bins',
'Edit Storage Location': 'Edit Storage Location',
'Edit Subscription': 'Edit Subscription',
'Edit Survey Answer': 'Chỉnh sửa trả lời khảo sát',
'Edit Survey Question': 'Edit Survey Question',
'Edit Survey Section': 'Edit Survey Section',
'Edit Survey Series': 'Edit Survey Series',
'Edit Survey Template': 'Edit Survey Template',
'Edit Task': 'Edit Task',
'Edit Team': 'Edit Team',
'Edit Theme': 'Edit Theme',
'Edit Themes': 'Edit Themes',
'Edit Ticket': 'Edit Ticket',
'Edit Track': 'Edit Track',
'Edit Tropo Settings': 'Edit Tropo Settings',
'Edit Unit': 'Edit Unit',
'Edit User': 'Edit User',
'Edit Volunteer Details': 'Edit Volunteer Details',
'Edit Volunteer Registration': 'Chỉnh sửa đăng ký tình nguyện viên',
'Edit Warehouse': 'Edit Warehouse',
'Edit Warehouse Item': 'Edit Warehouse Item',
'Edit current record': 'Chỉnh sửa bản thu hiện tại',
'Edit message': 'Edit message',
'Edit the Application': 'Chỉnh sửa ứng dụng',
'Editable?': 'Editable?',
'Education': 'Giáo dục',
'Education materials received': 'Đã nhận được tài liệu, dụng cụ phục vụ học tập',
'Education materials, source': 'Dụng cụ học tập, nguồn',
'Effects Inventory': 'Effects Inventory',
'Eggs': 'Eggs',
'Either a shelter or a location must be specified': 'Either a shelter or a location must be specified',
'Either file upload or document URL required.': 'Either file upload or document URL required.',
'Either file upload or image URL required.': 'yêu cầu upload file hoặc URL ảnh',
'Elderly person headed households (>60 yrs)': 'Elderly person headed households (>60 yrs)',
'Electrical': 'Electrical',
'Elevated': 'Nâng cao lên',
'Email': 'Email',
'Email Settings': 'Email Settings',
'Email address verified, however registration is still pending approval - please wait until confirmation received.': 'Địa chỉ email đã được xác nhận, tuy nhiên đăng ký vẫn còn chờ duyệt - hãy đợi đến khi nhận được phê chuẩn',
'Email settings updated': 'Email settings updated',
'Embalming': 'Embalming',
'Embassy': 'Embassy',
'Emergency Capacity Building project': 'Emergency Capacity Building project',
'Emergency Department': 'Bộ phận cấp cứu',
'Emergency Shelter': 'Emergency Shelter',
'Emergency Support Facility': 'Emergency Support Facility',
'Emergency Support Service': 'Emergency Support Service',
'Emergency Telecommunications': 'Emergency Telecommunications',
'Enable/Disable Layers': 'Kích hoạt/Tắt Layer',
'Enabled': 'Enabled',
'End date': 'Ngày kết thúc',
'End date should be after start date': 'End date should be after start date',
'End of Period': 'End of Period',
'English': 'English',
'Enter Coordinates in Deg Min Sec': 'Nhập tọa độ ở dạng Độ,Phút,Giây',
'Enter Coordinates:': 'Enter Coordinates:',
'Enter a GPS Coord': 'Enter a GPS Coord',
'Enter a date before': 'Enter a date before',
'Enter a location': 'Enter a location',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Nhập tên cho bảng tính bạn đang tải lên(bắt buộc)',
'Enter a new support request.': 'Nhập một yêu cầu hỗ trợ mới',
'Enter a summary of the request here.': 'Nhập tóm tắt các yêu cầu ở đây',
'Enter a unique label!': 'Enter a unique label!',
'Enter a valid email': 'Enter a valid email',
'Enter tags separated by commas.': 'Enter tags separated by commas.',
'Enter the same password as above': 'Enter the same password as above',
'Enter your firstname': 'Nhập họ của bạn',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Không bắt buộc phải nhập số điện thoại nhưng nếu nhập, bạn sẽ nhận được tin nhắn từ hệ thống',
'Entry deleted': 'Entry deleted',
'Equipment': 'Equipment',
'Error encountered while applying the theme.': 'Error encountered while applying the theme.',
'Error in message': 'Error in message',
'Error logs for "%(app)s"': 'Error logs for "%(app)s"',
'Errors': 'Lỗi',
'Estimated # of households who are affected by the emergency': 'Ước tính # số hộ chịu ảnh hưởng từ thiên tai',
'Estimated # of people who are affected by the emergency': 'Estimated # of people who are affected by the emergency',
'Estimated total number of people in institutions': 'Estimated total number of people in institutions',
'Euros': 'Euro',
'Evacuating': 'Evacuating',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Đánh giá thông tin trong thư. (giá trị này KHÔNG NÊN sử dụng trong các ứng dụng cảnh báo công cộng)',
'Event Time': 'Event Time',
'Event Type': 'Loại Sự kiện',
'Event type': 'Event type',
'Example': 'Example',
'Exceeded': 'Exceeded',
'Excreta disposal': 'Excreta disposal',
'Execute a pre-planned activity identified in <instruction>': 'Execute a pre-planned activity identified in <instruction>',
'Existing food stocks, main dishes': 'Existing food stocks, main dishes',
'Existing food stocks, side dishes': 'Existing food stocks, side dishes',
'Expected In': 'Expected In',
'Expected Out': 'Theo dự kiến',
'Expiry Time': 'Expiry Time',
'Explosive Hazard': 'Explosive Hazard',
'Export': 'Export',
'Export Data': 'Export Data',
'Export Database as CSV': 'Export Database as CSV',
'Export in GPX format': 'Export in GPX format',
'Export in KML format': 'Export in KML format',
'Export in OSM format': 'Export in OSM format',
'Export in PDF format': 'Export in PDF format',
'Export in RSS format': 'Export in RSS format',
'Export in XLS format': 'Export in XLS format',
'Eye Color': 'Màu mắt',
'Facebook': 'Facebook',
'Facial hair, color': 'Facial hair, color',
'Facial hair, type': 'Facial hair, type',
'Facial hear, length': 'Facial hear, length',
'Facility Operations': 'Facility Operations',
'Facility Status': 'Facility Status',
'Facility Type': 'Facility Type',
'Factors affecting school attendance': 'Factors affecting school attendance',
'Failed!': 'Failed!',
'Falling Object Hazard': 'Falling Object Hazard',
'Families/HH': 'Families/HH',
'Family': 'Family',
'Family tarpaulins received': 'Family tarpaulins received',
'Family tarpaulins, source': 'Family tarpaulins, source',
'Family/friends': 'Gia đình/Bạn bè',
'Farmland/fishing material assistance, Rank': 'Farmland/fishing material assistance, Rank',
'Fax': 'Fax',
'Feature Class': 'Feature Class',
'Feature Class Details': 'Feature Class Details',
'Feature Class added': 'Feature Class added',
'Feature Class deleted': 'Feature Class deleted',
'Feature Class updated': 'Feature Class updated',
'Feature Classes': 'Các mức phân loại tính năng',
'Feature Classes are collections of Locations (Features) of the same type': 'Các mức phân loại tính năng là tập hợp các vị trí (tính năng) cùng loại',
'Feature Layer Details': 'Feature Layer Details',
'Feature Layer added': 'Lớp đặc tính đã được thêm',
'Feature Layer deleted': 'Feature Layer deleted',
'Feature Layer updated': 'Cập nhật Layer tính năng',
'Feature Layers': 'Feature Layers',
'Feature Namespace': 'Feature Namespace',
'Feature Type': 'Loại tính năng',
'Features Include': 'Features Include',
'Female': 'Female',
'Female headed households': 'Female headed households',
'Few': 'Một vài',
'Field Hospital': 'Field Hospital',
'File': 'File',
'Fill in Latitude': 'Fill in Latitude',
'Fill in Longitude': 'Fill in Longitude',
'Filter': 'Filter',
'Filter Field': 'Filter Field',
'Filter Value': 'Giá trị lọc',
'Filtered search of aid pledges and requests': 'Filtered search of aid pledges and requests',
'Find': 'Tìm',
'Find Dead Body Report': 'Find Dead Body Report',
'Find Recovery Report': 'Tìm Báo cáo phục hồi',
'Find Volunteers': 'Find Volunteers',
'Find by Name': 'Find by Name',
'Finder': 'Finder',
'Fingerprint': 'Fingerprint',
'Fingerprinting': 'Dấu vân tay',
'Fingerprints': 'Fingerprints',
'Finish': 'Finish',
'Finished Jobs': 'Finished Jobs',
'Fire': 'Fire',
'Fire suppression and rescue': 'Fire suppression and rescue',
'First Name': 'First Name',
'First name': 'Tên',
'Fishing': 'Fishing',
'Flash Flood': 'Flash Flood',
'Flash Freeze': 'Flash Freeze',
'Fleet Management': 'Fleet Management',
'Flexible Impact Assessments': 'Flexible Impact Assessments',
'Flood': 'Lũ lụt',
'Flood Alerts': 'Flood Alerts',
'Flood Alerts show water levels in various parts of the country': 'Flood Alerts show water levels in various parts of the country',
'Flood Report': 'Flood Report',
'Flood Report Details': 'Chi tiết báo cáo tình hình lũ lụt',
'Flood Report added': 'Báo cáo lũ lụt đã được thêm',
'Flood Report deleted': 'Flood Report deleted',
'Flood Report updated': 'Flood Report updated',
'Flood Reports': 'Flood Reports',
'Flow Status': 'Flow Status',
'Focal Point': 'Tiêu điểm ',
'Fog': 'Fog',
'Food': 'Food',
'Food Supply': 'Food Supply',
'Food assistance available/expected': 'Food assistance available/expected',
'Footer': 'Footer',
'Footer file %s missing!': 'Footer file %s missing!',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Đối với mỗi đối tác đồng bộ, có một công việc đồng bộ mặc định chạy sau một khoảng thời gian nhất định. Bạn cũng có thể thiết lập thêm công việc đồng bộ hơn nữa để có thể tùy biến theo nhu cầu. Nhấp vào liên kết bên phải để bắt đầu',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners',
'For live help from the Sahana community on using this application, go to': 'For live help from the Sahana community on using this application, go to',
'For messages that support alert network internal functions': 'For messages that support alert network internal functions',
'For more details on the Sahana Eden system, see the': 'For more details on the Sahana Eden system, see the',
'For more information, see ': 'For more information, see ',
'For:': 'For:',
'Forest Fire': 'Forest Fire',
'Formal camp': 'Trại chính thức',
'Format': 'Định dạng',
'Forms': 'Forms',
'Found': 'Found',
'Freezing Drizzle': 'Freezing Drizzle',
'Freezing Rain': 'Freezing Rain',
'Freezing Spray': 'Freezing Spray',
'French': 'French',
'Friday': 'Friday',
'From Location': 'From Location',
'From Warehouse': 'From Warehouse',
'Frost': 'Frost',
'Full': 'Full',
'Full beard': 'Full beard',
'Fullscreen Map': 'Fullscreen Map',
'Functional Tests': 'Functional Tests',
'Functions available': 'Functions available',
'Funding Organization': 'Funding Organization',
'Funeral': 'Funeral',
'GIS Reports of Shelter': 'GIS Reports of Shelter',
'GIS integration to view location details of the Shelter': 'GIS integration to view location details of the Shelter',
'GPS': 'GPS',
'GPS Marker': 'Đánh dấu GPS',
'GPS Track': 'GPS Track',
'GPS Track File': 'GPS Track File',
'GPX Track': 'GPX Track',
'Gale Wind': 'Gale Wind',
'Gap Analysis': 'Gap Analysis',
'Gap Analysis Map': 'Gap Analysis Map',
'Gap Analysis Report': 'Gap Analysis Report',
'Gap Map': 'Gap Map',
'Gap Report': 'Gap Report',
'Gateway Settings': 'Gateway Settings',
'Gateway settings updated': 'Gateway settings updated',
'Gender': 'Gender',
'General Medical/Surgical': 'General Medical/Surgical',
'General emergency and public safety': 'General emergency and public safety',
'Generator': 'Bộ sinh',
'Geocoder Selection': 'Geocoder Selection',
'Geometry Name': 'Geometry Name',
'Geophysical (inc. landslide)': 'Geophysical (inc. landslide)',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Geraldo module not available within the running Python - this needs installing for PDF output!',
'Girls 13-18 yrs in affected area': 'Girls 13-18 yrs in affected area',
'Girls 13-18 yrs not attending school': 'Girls 13-18 yrs not attending school',
'Girls 6-12 yrs in affected area': 'Girls 6-12 yrs in affected area',
'Girls 6-12 yrs not attending school': 'Girls 6-12 yrs not attending school',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).',
'Give information about where and when you have seen the person': 'Give information about where and when you have seen the person',
'Give information about where and when you have seen them': 'Give information about where and when you have seen them',
'Global Messaging Settings': 'Cài đặt hộp thư tin nhắn toàn cầu',
'Goatee': 'Goatee',
'Government': 'Government',
'Government UID': 'Government UID',
'Government building': 'Government building',
'Grade': 'Grade',
'Greek': 'Greek',
'Group': 'Group',
'Group Details': 'Group Details',
'Group ID': 'Group ID',
'Group Member added': 'Group Member added',
'Group Members': 'Group Members',
'Group Memberships': 'Group Memberships',
'Group Title': 'Group Title',
'Group Type': 'Loại nhóm',
'Group added': 'Đã thêm nhóm',
'Group deleted': 'Group deleted',
'Group description': 'Mô tả nhóm',
'Group name': 'Group name',
'Group type': 'Loại nhóm',
'Group updated': 'Group updated',
'Groups': 'Groups',
'Groups removed': 'Groups removed',
'Guest': 'Guest',
'Hail': 'Hail',
'Hair Color': 'Hair Color',
'Hair Length': 'Hair Length',
'Hair Style': 'Kiểu tóc',
'Has data from this Reference Document been entered into Sahana?': 'Has data from this Reference Document been entered into Sahana?',
'Has the safety and security of women and children in your community changed since the emergency?': 'Has the safety and security of women and children in your community changed since the emergency?',
'Has your business been damaged in the course of the disaster?': 'Has your business been damaged in the course of the disaster?',
'Have households received any shelter/NFI assistance or is assistance expected in the coming days?': 'Have households received any shelter/NFI assistance or is assistance expected in the coming days?',
'Have normal food sources been disrupted?': 'Have normal food sources been disrupted?',
'Have schools received or are expecting to receive any assistance?': 'Have schools received or are expecting to receive any assistance?',
'Have the people received or are you expecting any medical or food assistance in the coming days?': 'Have the people received or are you expecting any medical or food assistance in the coming days?',
'Hazard Pay': 'Hazard Pay',
'Hazardous Material': 'Hazardous Material',
'Hazardous Road Conditions': 'Hazardous Road Conditions',
'Header Background': 'Header Background',
'Header background file %s missing!': 'Header background file %s missing!',
'Headquarters': 'Headquarters',
'Health': 'Health',
'Health care assistance, Rank': 'Health care assistance, Rank',
'Health center': 'Trung tâm y tế',
'Health center with beds': 'Health center with beds',
'Health center without beds': 'Health center without beds',
'Health services functioning prior to disaster': 'Health services functioning prior to disaster',
'Health services functioning since disaster': 'Health services functioning since disaster',
'Healthcare Worker': 'Healthcare Worker',
'Heat Wave': 'Heat Wave',
'Heat and Humidity': 'Heat and Humidity',
'Height': 'Height',
'Height (cm)': 'Height (cm)',
'Help': 'Help',
'Helps to monitor status of hospitals': 'Hỗ trợ giám sát trạng thái các bệnh viện',
'Helps to report and search for Missing Persons': 'Hỗ trợ báo cáo và tìm kếm những người mất tích',
'Here are the solution items related to the problem.': 'Here are the solution items related to the problem.',
'High': 'High',
'High Water': 'High Water',
'Hindu': 'Hindu',
'History': 'Lịch sử',
'Hit the back button on your browser to try again.': 'Nhấp vào nút Back trên trình duyệt để tải lại',
'Holiday Address': 'Holiday Address',
'Home': 'Trang chủ',
'Home Address': 'Địa chỉ nhà',
'Home Country': 'Quê quán',
'Home Crime': 'Home Crime',
'Hospital': 'Bệnh viện',
'Hospital Details': 'Chi tiết thông tin bệnh viện',
'Hospital Status Report': 'Báo cáo tình trạng bệnh viện',
'Hospital information added': 'Đã thêm thông tin Bệnh viện',
'Hospital information deleted': 'Đã xóa thông tin bệnh viện',
'Hospital information updated': 'Đã cập nhật thông tin bệnh viện',
'Hospital status assessment.': 'Đánh giá trạng thái bệnh viện',
'Hospitals': 'Bệnh viện',
'Hot Spot': 'Điểm nóng',
'Hourly': 'Hourly',
'Household kits received': 'Household kits received',
'Household kits, source': 'Household kits, source',
'How did boys 13-17yrs spend most of their time prior to the disaster?': 'How did boys 13-17yrs spend most of their time prior to the disaster?',
'How did boys <12yrs spend most of their time prior to the disaster?': 'How did boys <12yrs spend most of their time prior to the disaster?',
'How did boys girls 13-17yrs spend most of their time prior to the disaster?': 'How did boys girls 13-17yrs spend most of their time prior to the disaster?',
'How did girls <12yrs spend most of their time prior to the disaster?': 'How did girls <12yrs spend most of their time prior to the disaster?',
'How do boys 13-17yrs spend most of their time now?': 'How do boys 13-17yrs spend most of their time now?',
'How do boys <12yrs spend most of their time now?': 'How do boys <12yrs spend most of their time now?',
'How do girls 13-17yrs spend most of their time now?': 'How do girls 13-17yrs spend most of their time now?',
'How do girls <12yrs spend most of their time now?': 'How do girls <12yrs spend most of their time now?',
'How does it work?': 'How does it work?',
'How is this person affected by the disaster? (Select all that apply)': 'How is this person affected by the disaster? (Select all that apply)',
'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.': 'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.',
'How long does it take you to walk to the health service?': 'How long does it take you to walk to the health service?',
'How long will the food last?': 'How long will the food last?',
'How long will this water resource last?': 'How long will this water resource last?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'How many Boys (0-17 yrs) are Dead due to the crisis',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'How many Boys (0-17 yrs) are Injured due to the crisis',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'Có bao nhiêu bé trai (0 đến 17 tuổi) bị mất tích do thiên tai',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'How many Girls (0-17 yrs) are Dead due to the crisis',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'How many Girls (0-17 yrs) are Injured due to the crisis',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'How many Girls (0-17 yrs) are Missing due to the crisis',
'How many Men (18 yrs+) are Dead due to the crisis': 'Bao nhiêu người (trên 18 tuổi) chết trong thảm họa',
'How many Men (18 yrs+) are Injured due to the crisis': 'How many Men (18 yrs+) are Injured due to the crisis',
'How many Men (18 yrs+) are Missing due to the crisis': 'How many Men (18 yrs+) are Missing due to the crisis',
'How many Women (18 yrs+) are Dead due to the crisis': 'How many Women (18 yrs+) are Dead due to the crisis',
'How many Women (18 yrs+) are Injured due to the crisis': 'Số nạn nhân là nữ trên 18 tuổi chịu ảnh hưởng của cuộc khủng hoảng',
'How many Women (18 yrs+) are Missing due to the crisis': 'How many Women (18 yrs+) are Missing due to the crisis',
'How many days will the supplies last?': 'How many days will the supplies last?',
'How many doctors in the health centers are still actively working?': 'How many doctors in the health centers are still actively working?',
'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?': 'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?',
'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': 'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?',
'How many latrines are available in the village/IDP centre/Camp?': 'How many latrines are available in the village/IDP centre/Camp?',
'How many midwives in the health centers are still actively working?': 'How many midwives in the health centers are still actively working?',
'How many new cases have been admitted to this facility in the past 24h?': 'How many new cases have been admitted to this facility in the past 24h?',
'How many nurses in the health centers are still actively working?': 'How many nurses in the health centers are still actively working?',
'How many of the patients with the disease died in the past 24h at this facility?': 'How many of the patients with the disease died in the past 24h at this facility?',
'How many of the primary school age boys (6-12) in the area are not attending school?': 'How many of the primary school age boys (6-12) in the area are not attending school?',
'How many of the primary school age girls (6-12) in the area are not attending school?': 'How many of the primary school age girls (6-12) in the area are not attending school?',
'How many of the primary/secondary schools are now open and running a regular schedule of class?': 'How many of the primary/secondary schools are now open and running a regular schedule of class?',
'How many of the secondary school age boys (13-18) in the area are not attending school?': 'How many of the secondary school age boys (13-18) in the area are not attending school?',
'How many of the secondary school age girls (13-18) in the area are not attending school?': 'How many of the secondary school age girls (13-18) in the area are not attending school?',
'How many patients with the disease are currently hospitalized at this facility?': 'How many patients with the disease are currently hospitalized at this facility?',
'How many primary school age boys (6-12) are in the affected area?': 'How many primary school age boys (6-12) are in the affected area?',
'How many primary school age girls (6-12) are in the affected area?': 'How many primary school age girls (6-12) are in the affected area?',
'How many primary/secondary schools were opening prior to the disaster?': 'How many primary/secondary schools were opening prior to the disaster?',
'How many secondary school age boys (13-18) are in the affected area?': 'How many secondary school age boys (13-18) are in the affected area?',
'How many secondary school age girls (13-18) are in the affected area?': 'How many secondary school age girls (13-18) are in the affected area?',
'How many teachers have been affected by the disaster (affected = unable to work)?': 'How many teachers have been affected by the disaster (affected = unable to work)?',
'How many teachers worked in the schools prior to the disaster?': 'How many teachers worked in the schools prior to the disaster?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.',
'Humanitarian NGO': 'Humanitarian NGO',
'Hurricane': 'Hurricane',
'Hurricane Force Wind': 'Hurricane Force Wind',
'Hygiene': 'Hygiene',
'Hygiene NFIs': 'Hygiene NFIs',
'Hygiene kits received': 'Hygiene kits received',
'Hygiene kits, source': 'Dụng cụ vệ sinh, nguồn',
'Hygiene practice': 'Hygiene practice',
'Hygiene problems': 'Hygiene problems',
'ID Label': 'ID Label',
'ID Tag': 'ID Tag',
'ID Tag Number': 'ID Tag Number',
'ID type': 'ID type',
'Ice Pressure': 'Áp suất băng',
'Iceberg': 'Iceberg',
'Ideally a full URL to the source file, otherwise just a note on where data came from.': 'Ideally a full URL to the source file, otherwise just a note on where data came from.',
'Identification': 'Identification',
'Identification Report': 'Identification Report',
'Identification Reports': 'Identification Reports',
'Identification Status': 'Identification Status',
'Identification label of the Storage bin.': 'Nhãn xác định Bin lưu trữ',
'Identified as': 'Identified as',
'Identified by': 'Identified by',
'Identity': 'Identity',
'Identity Details': 'Identity Details',
'Identity added': 'Identity added',
'Identity deleted': 'Identity deleted',
'Identity updated': 'Identity updated',
'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': 'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.',
'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': 'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.',
'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': 'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.',
'If no marker defined then the system default marker is used': 'If no marker defined then the system default marker is used',
'If no, specify why': 'If no, specify why',
'If the location is a geographic area, then state at what level here.': 'If the location is a geographic area, then state at what level here.',
'If this is set to True then mails will be deleted from the server after downloading.': 'If this is set to True then mails will be deleted from the server after downloading.',
'If this record should be restricted then select which role is required to access the record here.': 'If this record should be restricted then select which role is required to access the record here.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'If this record should be restricted then select which role(s) are permitted to access the record here.',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": "If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.",
'If yes, specify what and by whom': 'If yes, specify what and by whom',
'If yes, which and how': 'nếu có thì cái nào và như thế nào',
"If you cannot find the person you want to register as a volunteer, you can add them by clicking 'Add Person' below:": "If you cannot find the person you want to register as a volunteer, you can add them by clicking 'Add Person' below:",
"If you cannot find the person you want to report missing, you can add them by clicking 'Add Person' below:": "If you cannot find the person you want to report missing, you can add them by clicking 'Add Person' below:",
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": "If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:",
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.',
'If you know what the Geonames ID of this location is then you can enter it here.': 'If you know what the Geonames ID of this location is then you can enter it here.',
'If you know what the OSM ID of this location is then you can enter it here.': 'If you know what the OSM ID of this location is then you can enter it here.',
'If you need to add a new document then you can click here to attach one.': 'Nếu cần thêm một tài liệu mới, nhấn vào đây để đính kèm',
'If you would like to help, then please': 'If you would like to help, then please',
'Illegal Immigrant': 'Illegal Immigrant',
'Image': 'Image',
'Image Details': 'Hình ảnh chi tiết',
'Image Tags': 'Image Tags',
'Image Type': 'Image Type',
'Image Upload': 'Image Upload',
'Image added': 'Image added',
'Image deleted': 'Image deleted',
'Image updated': 'Image updated',
'Image/Attachment': 'Image/Attachment',
'Image/Other Attachment': 'Image/Other Attachment',
'Imagery': 'Imagery',
'Images': 'Images',
'Immediate reconstruction assistance, Rank': 'Immediate reconstruction assistance, Rank',
'Impact Assessments': 'Impact Assessments',
'Impact Details': 'Impact Details',
'Impact Type': 'Impact Type',
'Impact Type Details': 'Impact Type Details',
'Impact Type added': 'Impact Type added',
'Impact Type deleted': 'Impact Type deleted',
'Impact Type updated': 'Impact Type updated',
'Impact Types': 'Impact Types',
'Impact added': 'Impact added',
'Impact deleted': 'Impact deleted',
'Impact updated': 'Impact updated',
'Impacts': 'Impacts',
'Import': 'Import',
'Import & Export Data': 'Import & Export Data',
'Import Data': 'Import Data',
'Import Job': 'Import Job',
'Import Jobs': 'Chuyển đổi nghề nghiệp',
'Import and Export': 'Import and Export',
'Import from Ushahidi Instance': 'Import from Ushahidi Instance',
'Import if Master': 'Import if Master',
'Import job created': 'Import job created',
'Import multiple tables as CSV': 'Chuyển đổi định dạng bảng sang CSV',
'Import/Export': 'Import/Export',
'Important': 'Quan trọng',
'Importantly where there are no aid services being provided': 'Importantly where there are no aid services being provided',
'Imported': 'Imported',
'Importing data from spreadsheets': 'Importing data from spreadsheets',
'Improper decontamination': 'Improper decontamination',
'Improper handling of dead bodies': 'Improper handling of dead bodies',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Trong GeoServer, đây là tên lớp. Trong WFS getCapabilities, đây là tên FeatureType, phần sau dấu hai chấm (:).',
'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).',
'In Inventories': 'In Inventories',
'In Process': 'In Process',
'In Progress': 'In Progress',
'In Transit': 'In Transit',
'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?': 'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?',
'Inbound Mail Settings': 'Inbound Mail Settings',
'Incident': 'Incident',
'Incident Categories': 'Incident Categories',
'Incident Details': 'Incident Details',
'Incident Report': 'Incident Report',
'Incident Report Details': 'Incident Report Details',
'Incident Report added': 'Incident Report added',
'Incident Report deleted': 'Incident Report deleted',
'Incident Report updated': 'Incident Report updated',
'Incident Reporting': 'Incident Reporting',
'Incident Reporting System': 'Incident Reporting System',
'Incident Reports': 'Incident Reports',
'Incident added': 'Incident added',
'Incident deleted': 'Incident deleted',
'Incident updated': 'Incident updated',
'Incidents': 'Incidents',
'Incomplete': 'Incomplete',
'Individuals': 'Individuals',
'Industrial Crime': 'Industrial Crime',
'Industry Fire': 'Industry Fire',
'Industry close to village/camp': 'Industry close to village/camp',
'Infant (0-1)': 'Trẻ sơ sinh',
'Infectious Disease': 'Infectious Disease',
'Infectious Diseases': 'Infectious Diseases',
'Infestation': 'Infestation',
'Informal Leader': 'Informal Leader',
'Informal camp': 'Informal camp',
'Information gaps': 'Information gaps',
'Infusion catheters available': 'Infusion catheters available',
'Infusion catheters need per 24h': 'Infusion catheters need per 24h',
'Infusion catheters needed per 24h': 'Infusion catheters needed per 24h',
'Infusions available': 'Infusions available',
'Infusions needed per 24h': 'Infusions needed per 24h',
'Input Job': 'Input Job',
'Instant Porridge': 'Instant Porridge',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": "Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.",
'Institution': 'Institution',
'Insufficient': 'Insufficient',
'Insufficient vars: Need module, resource, jresource, instance': 'Insufficient vars: Need module, resource, jresource, instance',
'Intake Items': 'Intake Items',
'Intergovernmental Organisation': 'Intergovernmental Organisation',
'Internal State': 'Internal State',
'International NGO': 'Tổ chức phi chính phủ quốc tế',
'International Organization': 'International Organization',
'International Staff': 'International Staff',
'Intervention': 'Intervention',
'Interview taking place at': 'Interview taking place at',
'Invalid': 'Invalid',
'Invalid Query': 'Truy vấn không hợp lệ',
'Invalid email': 'Invalid email',
'Invalid request!': 'Yêu cầu không hợp lệ',
'Invalid ticket': 'Ticket không hợp lệ',
'Inventories with Item': 'Inventories with Item',
'Inventory': 'Inventory',
'Inventory Item Details': 'Chi tiết hàng hóa trong kho',
'Inventory Item added': 'Bổ sung hàng hóa vào kho lưu trữ.',
'Inventory Item deleted': 'Inventory Item deleted',
'Inventory Item updated': 'Inventory Item updated',
'Inventory Items': 'Inventory Items',
'Inventory Management': 'Inventory Management',
'Inventory Store': 'Inventory Store',
'Inventory Store Details': 'Chi tiết kho lưu trữ',
'Inventory Store added': 'Inventory Store added',
'Inventory Store deleted': 'Inventory Store deleted',
'Inventory Store updated': 'Inventory Store updated',
'Inventory Stores': 'Inventory Stores',
'Inventory of Effects': 'Kho dự phòng',
'Inventory/Ledger': 'Inventory/Ledger',
'Is adequate food and water available for these institutions?': 'Is adequate food and water available for these institutions?',
'Is it safe to collect water?': 'Is it safe to collect water?',
'Is there any industrial or agro-chemical production close to the affected area/village?': 'Is there any industrial or agro-chemical production close to the affected area/village?',
'Issuing Authority': 'Issuing Authority',
'It is built using the Template agreed by a group of NGOs working together as the': 'It is built using the Template agreed by a group of NGOs working together as the',
'Item': 'Item',
'Item Catalog Categories': 'Item Catalog Categories',
'Item Catalog Category': 'Item Catalog Category',
'Item Catalog Category Details': 'Item Catalog Category Details',
'Item Catalog Category added': 'Item Catalog Category added',
'Item Catalog Category deleted': 'Item Catalog Category deleted',
'Item Catalog Category updated': 'Item Catalog Category updated',
'Item Catalog Details': 'Item Catalog Details',
'Item Catalog added': 'Item Catalog added',
'Item Catalog deleted': 'Đã xóa danh mục hàng hóa',
'Item Catalog updated': 'Item Catalog updated',
'Item Catalogs': 'Item Catalogs',
'Item Categories': 'Item Categories',
'Item Category': 'Item Category',
'Item Category Details': 'Item Category Details',
'Item Category added': 'Item Category added',
'Item Category deleted': 'Đã xóa Tiêu chí hàng hóa',
'Item Category updated': 'Item Category updated',
'Item Details': 'Item Details',
'Item Packet Details': 'Item Packet Details',
'Item Packet added': 'Item Packet added',
'Item Packet deleted': 'Item Packet deleted',
'Item Packet updated': 'Item Packet updated',
'Item Packets': 'Item Packets',
'Item Sub-Categories': 'Item Sub-Categories',
'Item Sub-Category': 'Item Sub-Category',
'Item Sub-Category Details': 'Item Sub-Category Details',
'Item Sub-Category added': 'Item Sub-Category added',
'Item Sub-Category deleted': 'Item Sub-Category deleted',
'Item Sub-Category updated': 'Đã cập nhật tiêu chí phụ của hàng hóa',
'Item added': 'Item added',
'Item already in Bundle!': 'Hàng đã có trong Bundle!',
'Item already in Kit!': 'Item already in Kit!',
'Item already in budget!': 'Item already in budget!',
'Item deleted': 'Item deleted',
'Item updated': 'Item updated',
'Items': 'Hàng hóa',
'Japanese': 'Japanese',
'Jerry can': 'Jerry can',
'Jew': 'Jew',
'Job Title': 'Job Title',
'Jobs': 'Jobs',
'Just Once': 'Just Once',
'KPIs': 'KPIs',
'Key': 'Key',
'Key Details': 'Key Details',
'Key added': 'Key added',
'Key deleted': 'Key deleted',
'Key updated': 'Key updated',
'Keys': 'Keys',
'Kit': 'Kit',
'Kit Contents': 'Kit Contents',
'Kit Details': 'Chi tiết Kit',
'Kit Updated': 'Kit Updated',
'Kit added': 'Kit added',
'Kit deleted': 'Đã xóa Kit',
'Kit updated': 'Kit updated',
'Kits': 'Kits',
'Known Identities': 'Known Identities',
'Known incidents of violence against women/girls': 'Known incidents of violence against women/girls',
'Known incidents of violence since disaster': 'Known incidents of violence since disaster',
'LICENCE': 'bản quyền',
'LICENSE': 'LICENSE',
'LMS Administration': 'Quản trị LMS',
'Label': 'Nhãn',
'Lack of material': 'Lack of material',
'Lack of school uniform': 'Lack of school uniform',
'Lack of supplies at school': 'Lack of supplies at school',
'Lack of transport to school': 'Lack of transport to school',
'Lactating women': 'Lactating women',
'Lahar': 'Lahar',
'Landslide': 'Landslide',
'Language': 'Language',
'Last Name': 'Last Name',
'Last known location': 'Last known location',
'Last name': 'Last name',
'Last synchronization time': 'Last synchronization time',
'Last updated by': 'Last updated by',
'Last updated on': 'Last updated on',
'Latitude': 'Latitude',
'Latitude & Longitude': 'Latitude & Longitude',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.',
'Latitude should be between': 'Latitude should be between',
'Law enforcement, military, homeland and local/private security': 'Law enforcement, military, homeland and local/private security',
'Layer': 'Layer',
'Layer Details': 'Layer Details',
'Layer added': 'Layer added',
'Layer deleted': 'Đã xóa layer',
'Layer updated': 'Đã cập nhật Layer',
'Layers': 'Layers',
'Layers updated': 'Đã cập nhật Layer',
'Layout': 'Layout',
'Legend Format': 'Legend Format',
'Length': 'Độ dài',
'Level': 'Level',
"Level is higher than parent's": "Level is higher than parent's",
'Library support not available for OpenID': 'Library support not available for OpenID',
'Line': 'Line',
'Link Item & Shipment': 'Link Item & Shipment',
'Link an Item & Shipment': 'Link an Item & Shipment',
'Linked Records': 'Linked Records',
'Linked records': 'Linked records',
'List': 'List',
'List / Add Baseline Types': 'List / Add Baseline Types',
'List / Add Impact Types': 'List / Add Impact Types',
'List / Add Services': 'List / Add Services',
'List / Add Types': 'List / Add Types',
'List Activities': 'List Activities',
'List Aid Requests': 'Danh sách Yêu cầu cứu trợ',
'List All': 'List All',
'List All Entries': 'List All Entries',
'List All Memberships': 'Danh sách tất cả các thành viên',
'List Assessment Summaries': 'List Assessment Summaries',
'List Assessments': 'Danh sách Trị giá tính thuế',
'List Baseline Types': 'List Baseline Types',
'List Baselines': 'List Baselines',
'List Budgets': 'List Budgets',
'List Bundles': 'List Bundles',
'List Catalog Items': 'List Catalog Items',
'List Category<>Sub-Category<>Catalog Relation': 'List Category<>Sub-Category<>Catalog Relation',
'List Checklists': 'Danh sách Checklists ',
'List Cluster Subsectors': 'List Cluster Subsectors',
'List Clusters': 'List Clusters',
'List Configs': 'List Configs',
'List Conflicts': 'List Conflicts',
'List Contacts': 'List Contacts',
'List Distribution Items': 'List Distribution Items',
'List Distributions': 'Danh sách ủng hộ,quyên góp',
'List Documents': 'List Documents',
'List Donors': 'List Donors',
'List Feature Classes': 'List Feature Classes',
'List Feature Layers': 'List Feature Layers',
'List Flood Reports': 'List Flood Reports',
'List Groups': 'Danh sách Nhóm',
'List Groups/View Members': 'List Groups/View Members',
'List Hospitals': 'Danh sách Bệnh viện',
'List Identities': 'List Identities',
'List Images': 'List Images',
'List Impact Assessments': 'List Impact Assessments',
'List Impact Types': 'List Impact Types',
'List Impacts': 'List Impacts',
'List Incident Reports': 'List Incident Reports',
'List Incidents': 'List Incidents',
'List Inventory Items': 'List Inventory Items',
'List Inventory Stores': 'List Inventory Stores',
'List Item Catalog Categories': 'List Item Catalog Categories',
'List Item Catalogs': 'List Item Catalogs',
'List Item Categories': 'List Item Categories',
'List Item Packets': 'List Item Packets',
'List Item Sub-Categories': 'List Item Sub-Categories',
'List Items': 'List Items',
'List Keys': 'List Keys',
'List Kits': 'Danh sách Kit',
'List Layers': 'List Layers',
'List Locations': 'Danh sách Vị trí',
'List Log Entries': 'List Log Entries',
'List Markers': 'List Markers',
'List Members': 'List Members',
'List Memberships': 'Danh sách thành viên',
'List Messages': 'Danh sách tin nhắn ',
'List Metadata': 'Danh sách dữ liệu',
'List Missing Persons': 'Danh sách những người mất tích',
'List Need Types': 'List Need Types',
'List Needs': 'List Needs',
'List Offices': 'List Offices',
'List Organizations': 'List Organizations',
'List Peers': 'List Peers',
'List Personal Effects': 'List Personal Effects',
'List Persons': 'List Persons',
'List Photos': 'List Photos',
'List Positions': 'List Positions',
'List Problems': 'List Problems',
'List Projections': 'List Projections',
'List Projects': 'List Projects',
'List Rapid Assessments': 'List Rapid Assessments',
'List Received Items': 'List Received Items',
'List Received Shipments': 'List Received Shipments',
'List Records': 'List Records',
'List Registrations': 'List Registrations',
'List Reports': 'List Reports',
'List Request Items': 'Danh sách Hang hóa yêu cầu',
'List Requests': 'Danh sách yêu cầu',
'List Resources': 'Danh sách tài nguyên',
'List Responses': 'List Responses',
'List Rivers': 'Danh sách sông',
'List Roles': 'Danh sách Vai trò',
'List Sections': 'List Sections',
'List Sector': 'List Sector',
'List Sent Items': 'List Sent Items',
'List Sent Shipments': 'List Sent Shipments',
'List Service Profiles': 'List Service Profiles',
'List Settings': 'List Settings',
'List Shelter Services': 'List Shelter Services',
'List Shelter Types': 'List Shelter Types',
'List Shelters': 'List Shelters',
'List Shipment Transit Logs': 'List Shipment Transit Logs',
'List Shipment/Way Bills': 'Danh sách Đơn hàng/Phí đường bộ',
'List Shipment<>Item Relation': 'List Shipment<>Item Relation',
'List Sites': 'List Sites',
'List Skill Types': 'List Skill Types',
'List Skills': 'Danh sách kỹ năng',
'List Solutions': 'List Solutions',
'List Staff': 'Danh sách Nhân viên',
'List Staff Types': 'List Staff Types',
'List Status': 'List Status',
'List Storage Bin Type(s)': 'List Storage Bin Type(s)',
'List Storage Bins': 'List Storage Bins',
'List Storage Location': 'Danh sách vị trí kho lưu trữ',
'List Subscriptions': 'Danh sách Đăng ký',
'List Survey Answers': 'List Survey Answers',
'List Survey Questions': 'Danh sách câu hỏi khảo sát',
'List Survey Sections': 'List Survey Sections',
'List Survey Series': 'List Survey Series',
'List Survey Templates': 'List Survey Templates',
'List Tasks': 'List Tasks',
'List Teams': 'List Teams',
'List Themes': 'List Themes',
'List Tickets': 'Danh sách Ticket',
'List Tracks': 'List Tracks',
'List Units': 'Danh sách đơn vị',
'List Users': 'Danh sách người dùng',
'List Volunteers': 'List Volunteers',
'List Warehouse Items': 'List Warehouse Items',
'List Warehouses': 'List Warehouses',
'List all': 'Hiển thị tất cả',
'List of Items': 'List of Items',
'List of Missing Persons': 'Danh sách những người mất tích',
'List of Peers': 'List of Peers',
'List of Reports': 'List of Reports',
'List of Requests': 'Danh sách yêu cầu',
'List of Spreadsheets': 'List of Spreadsheets',
'List of Spreadsheets uploaded': 'List of Spreadsheets uploaded',
'List of Volunteers for this skills set': 'List of Volunteers for this skills set',
'List of addresses': 'Danh sách các địa chỉ',
'List unidentified': 'List unidentified',
'List/Add': 'Danh sách/Thêm',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Danh sách "Ai làm gì, ở đâu"Cho phép các tổ chức cứu trợ điều phối hoạt động của mình',
'Live Help': 'Trợ giúp',
'Livelihood': 'Livelihood',
'Load Cleaned Data into Database': 'Load Cleaned Data into Database',
'Load Details': 'Load Details',
'Load Raw File into Grid': 'Load Raw File into Grid',
'Load the details to help decide which is the best one to keep out of the 2.': 'Load the details to help decide which is the best one to keep out of the 2.',
'Loading Locations...': 'Loading Locations...',
'Local Name': 'Tên địa phương',
'Local Names': 'Local Names',
'Location': 'Location',
'Location 1': 'Location 1',
'Location 2': 'Location 2',
'Location De-duplicated': 'Location De-duplicated',
'Location Details': 'Location Details',
'Location added': 'Location added',
'Location deleted': 'Đã xóa vị trí',
'Location details': 'Location details',
'Location updated': 'Location updated',
'Location: ': 'Location: ',
'Locations': 'Locations',
'Locations De-duplicator': 'Locations De-duplicator',
'Locations of this level need to have a parent of level': 'Locations of this level need to have a parent of level',
'Locations should be different!': 'Locations should be different!',
'Lockdown': 'Lockdown',
'Log': 'Log',
'Log Entry Details': 'Log Entry Details',
'Log entry added': 'Log entry added',
'Log entry deleted': 'Xóa theo dõi đăng nhập',
'Log entry updated': 'Cập nhật theo dõi đăng nhập',
'Login': 'Đăng nhập',
'Logistics': 'Logistics',
'Logistics Management': 'Logistics Management',
'Logistics Management System': 'Logistics Management System',
'Logo': 'Logo',
'Logo file %s missing!': 'Logo file %s missing!',
'Logout': 'Logout',
'Long Text': 'Long Text',
'Longitude': 'Longitude',
'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': 'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Kinh độ trải dài theo hướng Đông-Tây. Kinh tuyến không nằm trên kinh tuyến gốc (Greenwich Mean Time) hướng về phía đông, vắt ngang châu Âu và châu Á.',
'Longitude should be between': 'Longitude should be between',
'Looting': 'Looting',
'Lost Password': 'Lost Password',
'Low': 'Low',
'Magnetic Storm': 'Magnetic Storm',
'Main cash source': 'Main cash source',
'Main income sources before disaster': 'Main income sources before disaster',
'Major outward damage': 'Major outward damage',
'Make Pledge': 'Make Pledge',
'Make Request': 'Make Request',
'Make a Request': 'Tạo yêu cầu',
'Make a Request for Aid': 'Tạo yêu cầu cứu trợ',
'Make preparations per the <instruction>': 'Make preparations per the <instruction>',
'Male': 'Male',
'Malnutrition present prior to disaster': 'Malnutrition present prior to disaster',
'Manage': 'Manage',
'Manage Category': 'Manage Category',
'Manage Item catalog': 'Manage Item catalog',
'Manage Kits': 'Manage Kits',
'Manage Relief Item Catalogue': 'Manage Relief Item Catalogue',
'Manage Sub-Category': 'Quản lý Tiêu chí phụ',
'Manage Users & Roles': 'Manage Users & Roles',
'Manage Warehouses/Sites': 'Manage Warehouses/Sites',
'Manage requests of hospitals for assistance.': 'Manage requests of hospitals for assistance.',
'Manage volunteers by capturing their skills, availability and allocation': 'Nắm bắt kỹ năng, khả năng và khu vực hoạt động của tình nguyện viên để quản lý',
'Manager': 'Manager',
'Managing Office': 'Managing Office',
'Managing, Storing and Distributing Catalog Items.': 'Managing, Storing and Distributing Catalog Items.',
'Managing, Storing and Distributing Relief Items': 'Quản lý, Lưu trữ và Quyên góp hàng cứu trợ',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).',
'Mandatory. The URL to access the service.': 'Mandatory. The URL to access the service.',
'Manual': 'Manual',
'Manual Synchronization': 'Manual Synchronization',
'Many': 'Many',
'Map': 'Map',
'Map Height': 'Chiều cao bản đồ',
'Map Service Catalogue': 'Catalogue bản đồ dịch vụ',
'Map Settings': 'Cài đặt bản đồ',
'Map Viewing Client': 'Map Viewing Client',
'Map Width': 'Độ rộng bản đồ',
'Map of Hospitals': 'Bản đồ bệnh viện',
'Mapping': 'Mapping',
'Marine Security': 'Marine Security',
'Marital Status': 'Tình trạng hôn nhân',
'Marker': 'Marker',
'Marker Details': 'Chi tiết Đèn hiệu',
'Marker added': 'Marker added',
'Marker deleted': 'Marker deleted',
'Marker updated': 'Marker updated',
'Markers': 'Markers',
'Master Message Log': 'Master Message Log',
'Master Message Log to process incoming reports & requests': 'Kiểm soát log tin nhắn để xử lý báo cáo và yêu cầu gửi đến',
'Match Percentage': 'Match Percentage',
'Match percentage indicates the % match between these two records': 'Match percentage indicates the % match between these two records',
'Matching Records': 'Matching Records',
'Matrix of Choices (Multiple Answers)': 'Matrix of Choices (Multiple Answers)',
'Matrix of Choices (Only one answer)': 'Matrix of Choices (Only one answer)',
'Matrix of Text Fields': 'Matrix of Text Fields',
'Max Persons per Dwelling': 'Max Persons per Dwelling',
'Maximum Weight': 'Maximum Weight',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': 'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.',
'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': 'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.',
'Medical and public health': 'Medical and public health',
'Medicine': 'Medicine',
'Medium': 'Medium',
'Megabytes per Month': 'Megabytes per Month',
'Members': 'Members',
'Membership': 'Membership',
'Membership Details': 'Membership Details',
'Membership added': 'Đã thêm thành viên',
'Membership deleted': 'Membership deleted',
'Membership updated': 'Cập nhật thông tin thành viên',
'Memberships': 'Memberships',
'Message': 'Message',
'Message Details': 'Message Details',
'Message Variable': 'Message Variable',
'Message added': 'Đã thêm tin nhắn',
'Message deleted': 'Message deleted',
'Message sent to outbox': 'Message sent to outbox',
'Message updated': 'Message updated',
'Message variable': 'Message variable',
'Messages': 'Messages',
'Messaging': 'Messaging',
'Messaging settings updated': 'Messaging settings updated',
'Metadata': 'Metadata',
'Metadata Details': 'Metadata Details',
'Metadata added': 'Đã thêm dữ liệu',
'Metadata can be supplied here to be applied to all uploaded photos, if desired.': 'Metadata can be supplied here to be applied to all uploaded photos, if desired.',
'Metadata deleted': 'Metadata deleted',
'Metadata updated': 'Metadata updated',
'Meteorite': 'Meteorite',
'Meteorological (inc. flood)': 'Meteorological (inc. flood)',
'Method used': 'Method used',
'Micronutrient malnutrition prior to disaster': 'Micronutrient malnutrition prior to disaster',
'Middle Name': 'Middle Name',
'Migrants or ethnic minorities': 'Dân di cư hoặc dân tộc thiểu số',
'Military': 'Military',
'Minorities participating in coping activities': 'Minorities participating in coping activities',
'Minutes must be a number between 0 and 60': 'Minutes must be a number between 0 and 60',
'Minutes per Month': 'Minutes per Month',
'Minutes should be a number greater than 0 and less than 60': 'Minutes should be a number greater than 0 and less than 60',
'Miscellaneous': 'Miscellaneous',
'Missing': 'Missing',
'Missing Person': 'Người mất tích',
'Missing Person Details': 'Chi tiết về người mất tích',
'Missing Person Reports': 'Báo cáo số người mất tích',
'Missing Persons': 'Người mất tích',
'Missing Persons Registry': 'Missing Persons Registry',
'Missing Persons Report': 'Báo cáo số người mất tích',
'Missing Report': 'Missing Report',
'Missing Senior Citizen': 'Missing Senior Citizen',
'Missing Vulnerable Person': 'Missing Vulnerable Person',
'Mobile': 'Mobile',
'Mobile Assess': 'Mobile Assess',
'Mobile Assess.': 'Mobile Assess.',
'Mobile Basic Assessment': 'Mobile Basic Assessment',
'Mobile Phone': 'Mobile Phone',
'Mode': 'Mode',
'Modem Settings': 'Modem Settings',
'Modem settings updated': 'Modem settings updated',
'Moderator': 'Moderator',
'Modify Information on groups and individuals': 'Modify Information on groups and individuals',
'Modifying data in spreadsheet before importing it to the database': 'Modifying data in spreadsheet before importing it to the database',
'Module Administration': 'Quản trị Mô-đun',
'Module disabled!': 'Module disabled!',
'Module provides access to information on current Flood Levels.': 'Module provides access to information on current Flood Levels.',
'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': 'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.',
'Monday': 'Thứ Hai',
'Monthly Cost': 'Monthly Cost',
'Monthly Salary': 'Monthly Salary',
'Months': 'Months',
'Morgue Status': 'Morgue Status',
'Morgue Units Available': 'Morgue Units Available',
'Mosque': 'Mosque',
'Motorcycle': 'Motorcycle',
'Moustache': 'Moustache',
'Movements (Filter In/Out/Lost)': 'Movements (Filter In/Out/Lost)',
'MultiPolygon': 'MultiPolygon',
'Multiple': 'Multiple',
'Multiple Choice (Multiple Answers)': 'Multiple Choice (Multiple Answers)',
'Multiple Choice (Only One Answer)': 'Multiple Choice (Only One Answer)',
'Multiple Text Fields': 'Multiple Text Fields',
'Multiplicator': 'Multiplicator',
'Muslim': 'Muslim',
'My Tasks': 'My Tasks',
'N/A': 'Không xác định',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.",
'Name': 'Tên',
'Name and/or ID': 'Name and/or ID',
'Name and/or ID Label': 'Name and/or ID Label',
'Name of Storage Bin Type.': 'Tên loại Bin lưu trữ',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Name of the file (& optional sub-path) located in views which should be used for footer.',
'Name of the person in local language and script (optional).': 'Name of the person in local language and script (optional).',
'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': 'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.',
'Names can be added in multiple languages': 'Names can be added in multiple languages',
'National ID Card': 'Chứng minh thư',
'National NGO': 'Các tổ chức phi chính phủ ',
'National Staff': 'National Staff',
'Nationality': 'Nationality',
'Nationality of the person.': 'Nationality of the person.',
'Nautical Accident': 'Nautical Accident',
'Nautical Hijacking': 'Nautical Hijacking',
'Need Type': 'Need Type',
'Need Type Details': 'Need Type Details',
'Need Type added': 'Need Type added',
'Need Type deleted': 'Need Type deleted',
'Need Type updated': 'Need Type updated',
'Need Types': 'Need Types',
"Need a 'url' argument!": "Need a 'url' argument!",
'Need added': 'Need added',
'Need deleted': 'Need deleted',
'Need to configure Twitter Authentication': 'Need to configure Twitter Authentication',
'Need to select 2 Locations': 'Need to select 2 Locations',
'Need to specify a Budget!': 'Need to specify a Budget!',
'Need to specify a Kit!': 'Need to specify a Kit!',
'Need to specify a Resource!': 'Need to specify a Resource!',
'Need to specify a bundle!': 'Need to specify a bundle!',
'Need to specify a group!': 'Need to specify a group!',
'Need to specify a location to search for.': 'Cần chọn địa điểm tìm kiếm',
'Need to specify a role!': 'Yêu cầu xác định vai trò',
'Need to specify a table!': 'Need to specify a table!',
'Need to specify a user!': 'Need to specify a user!',
'Need updated': 'Need updated',
'Needs': 'Needs',
'Needs Details': 'Needs Details',
'Needs to reduce vulnerability to violence': 'Needs to reduce vulnerability to violence',
'Negative Flow Isolation': 'Negative Flow Isolation',
'Neighbourhood': 'Neighbourhood',
'Neonatal ICU': 'Neonatal ICU',
'Neonatology': 'Neonatology',
'Network': 'Network',
'Neurology': 'Neurology',
'New': 'New',
'New Assessment reported from': 'New Assessment reported from',
'New Checklist': 'Checklist mới',
'New Peer': 'New Peer',
'New Record': 'New Record',
'New Report': 'New Report',
'New Request': 'Yêu cầu mới',
'New Solution Choice': 'New Solution Choice',
'New Synchronization Peer': 'New Synchronization Peer',
'New cases in the past 24h': 'New cases in the past 24h',
'Next': 'Next',
'No': 'No',
'No Activities Found': 'No Activities Found',
'No Addresses currently registered': 'Hiện tại chưa đăng ký Địa chỉ',
'No Aid Requests have been made yet': 'Chưa có yêu cầu cứu trợ nào được tạo',
'No Assessment Summaries currently registered': 'No Assessment Summaries currently registered',
'No Assessments currently registered': 'Chưa đăng ký trị giá tính thuế',
'No Baseline Types currently registered': 'No Baseline Types currently registered',
'No Baselines currently registered': 'No Baselines currently registered',
'No Budgets currently registered': 'No Budgets currently registered',
'No Bundles currently registered': 'No Bundles currently registered',
'No Catalog Items currently registered': 'No Catalog Items currently registered',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'Hiện tại chưa có Category<>Sub-Category<>Catalog Relation được đăng ký',
'No Checklist available': 'No Checklist available',
'No Cluster Subsectors currently registered': 'No Cluster Subsectors currently registered',
'No Clusters currently registered': 'No Clusters currently registered',
'No Configs currently defined': 'No Configs currently defined',
'No Details currently registered': 'No Details currently registered',
'No Distribution Items currently registered': 'Chưa đăng ký danh sách hàng hóa đóng góp',
'No Distributions currently registered': 'No Distributions currently registered',
'No Documents found': 'No Documents found',
'No Donors currently registered': 'No Donors currently registered',
'No Feature Classes currently defined': 'No Feature Classes currently defined',
'No Feature Layers currently defined': 'No Feature Layers currently defined',
'No Flood Reports currently registered': 'Chưa đăng ký báo cáo lũ lụt',
'No Groups currently defined': 'Hiện tại không xác định được nhóm',
'No Groups currently registered': 'No Groups currently registered',
'No Hospitals currently registered': 'Chưa có bệnh viện nào đăng ký',
'No Identification Report Available': 'No Identification Report Available',
'No Identities currently registered': 'No Identities currently registered',
'No Image': 'Không có ảnh',
'No Images currently registered': 'Hiện tại không có ảnh nào được đăng ký',
'No Impact Types currently registered': 'No Impact Types currently registered',
'No Impacts currently registered': 'No Impacts currently registered',
'No Incident Reports currently registered': 'No Incident Reports currently registered',
'No Incidents currently registered': 'Chưa sự việc nào được đưa lên',
'No Inventory Items currently registered': 'No Inventory Items currently registered',
'No Inventory Stores currently registered': 'No Inventory Stores currently registered',
'No Item Catalog Category currently registered': 'No Item Catalog Category currently registered',
'No Item Catalog currently registered': 'No Item Catalog currently registered',
'No Item Categories currently registered': 'No Item Categories currently registered',
'No Item Packets currently registered': 'No Item Packets currently registered',
'No Item Sub-Category currently registered': 'No Item Sub-Category currently registered',
'No Item currently registered': 'No Item currently registered',
'No Items currently registered': 'No Items currently registered',
'No Items currently requested': 'Hiện tại không có hàng hóa nào được yêu cầu',
'No Keys currently defined': 'No Keys currently defined',
'No Kits currently registered': 'No Kits currently registered',
'No Locations currently available': 'No Locations currently available',
'No Locations currently registered': 'No Locations currently registered',
'No Markers currently available': 'Chưa đăng ký marker ',
'No Members currently registered': 'Chưa đăng ký thành viên',
'No Memberships currently defined': 'Chưa xác nhận đăng ký thành viên',
'No Memberships currently registered': 'Chưa có thông tin đăng ký thành viên',
'No Messages currently in Outbox': 'No Messages currently in Outbox',
'No Metadata currently defined': 'No Metadata currently defined',
'No Need Types currently registered': 'No Need Types currently registered',
'No Needs currently registered': 'No Needs currently registered',
'No Offices currently registered': 'No Offices currently registered',
'No Offices found!': 'No Offices found!',
'No Organizations currently registered': 'No Organizations currently registered',
'No People currently registered in this shelter': 'No People currently registered in this shelter',
'No Persons currently registered': 'No Persons currently registered',
'No Persons currently reported missing': 'No Persons currently reported missing',
'No Persons found': 'No Persons found',
'No Photos found': 'Không tìm thấy ảnh nào',
'No Presence Log Entries currently registered': 'No Presence Log Entries currently registered',
'No Problems currently defined': 'No Problems currently defined',
'No Projections currently defined': 'Hiện tại chưa xác định được kế hoạch dự phòng',
'No Projects currently registered': 'Chưa đăng ký dự án',
'No Rapid Assessments currently registered': 'No Rapid Assessments currently registered',
'No Received Items currently registered': 'No Received Items currently registered',
'No Received Shipments': 'No Received Shipments',
'No Records currently available': 'No Records currently available',
'No Records matching the query': 'No Records matching the query',
'No Request Items currently registered': 'No Request Items currently registered',
'No Request Shipments': 'No Request Shipments',
'No Requests have been made yet': 'No Requests have been made yet',
'No Requests match this criteria': 'No Requests match this criteria',
'No Responses currently registered': 'No Responses currently registered',
'No Rivers currently registered': 'No Rivers currently registered',
'No Roles currently defined': 'No Roles currently defined',
'No Sections currently registered': 'No Sections currently registered',
'No Sectors currently registered': 'No Sectors currently registered',
'No Sent Items currently registered': 'No Sent Items currently registered',
'No Sent Shipments': 'No Sent Shipments',
'No Settings currently defined': 'No Settings currently defined',
'No Shelter Services currently registered': 'No Shelter Services currently registered',
'No Shelter Types currently registered': 'No Shelter Types currently registered',
'No Shelters currently registered': 'Hiện tại chưa đăng ký nơi cư trú',
'No Shipment Transit Logs currently registered': 'No Shipment Transit Logs currently registered',
'No Shipment/Way Bills currently registered': 'No Shipment/Way Bills currently registered',
'No Shipment<>Item Relation currently registered': 'No Shipment<>Item Relation currently registered',
'No Sites currently registered': 'No Sites currently registered',
'No Skill Types currently set': 'Chưa cài đặt loại kỹ năng',
'No Solutions currently defined': 'No Solutions currently defined',
'No Staff Types currently registered': 'No Staff Types currently registered',
'No Staff currently registered': 'No Staff currently registered',
'No Storage Bin Type currently registered': 'Chưa đăng ký Loại Bin lưu trữ',
'No Storage Bins currently registered': 'No Storage Bins currently registered',
'No Storage Locations currently registered': 'No Storage Locations currently registered',
'No Subscription available': 'No Subscription available',
'No Survey Answers currently registered': 'No Survey Answers currently registered',
'No Survey Questions currently registered': 'No Survey Questions currently registered',
'No Survey Sections currently registered': 'No Survey Sections currently registered',
'No Survey Series currently registered': 'No Survey Series currently registered',
'No Survey Template currently registered': 'No Survey Template currently registered',
'No Tasks with Location Data': 'No Tasks with Location Data',
'No Tasks with Location Data!': 'No Tasks with Location Data!',
'No Themes currently defined': 'No Themes currently defined',
'No Tickets currently registered': 'Hiện tại chưa đăng ký Ticket ',
'No Tracks currently available': 'No Tracks currently available',
'No Units currently registered': 'Chưa đăng ký tên đơn vị',
'No Users currently registered': 'Chưa đăng ký người dùng',
'No Volunteers currently registered': 'No Volunteers currently registered',
'No Warehouse Items currently registered': 'No Warehouse Items currently registered',
'No Warehouses currently registered': 'No Warehouses currently registered',
'No Warehouses match this criteria': 'No Warehouses match this criteria',
'No access at all': 'Không truy cập',
'No access to this record!': 'No access to this record!',
'No action recommended': 'No action recommended',
'No conflicts logged': 'No conflicts logged',
'No contact information available': 'No contact information available',
'No contacts currently registered': 'Chưa đăng ký thông tin liên lạc',
'No data in this table - cannot create PDF!': 'Không có dữ liệu trong bảng - không thể tạo file PDF',
'No databases in this application': 'No databases in this application',
'No entries found': 'No entries found',
'No entries matching the query': 'No entries matching the query',
'No import jobs': 'No import jobs',
'No linked records': 'Không có bản thu liên quan',
'No location found': 'No location found',
'No location known for this person': 'No location known for this person',
'No location known for this team': 'No location known for this team',
'No locations registered at this level': 'No locations registered at this level',
'No log entries matching the query': 'No log entries matching the query',
'No matching records found.': 'No matching records found.',
'No messages in the system': 'No messages in the system',
'No peers currently registered': 'No peers currently registered',
'No pending registrations found': 'Không tìm thấy đăng ký đang chờ',
'No pending registrations matching the query': 'No pending registrations matching the query',
'No person record found for current user.': 'No person record found for current user.',
'No positions currently registered': 'No positions currently registered',
'No problem group defined yet': 'No problem group defined yet',
'No records matching the query': 'No records matching the query',
'No records to delete': 'Không có bản thu để xóa',
'No recovery reports available': 'No recovery reports available',
'No report available.': 'Không có báo cáo',
'No reports available.': 'No reports available.',
'No reports currently available': 'No reports currently available',
'No requests found': 'Không tìm thấy yêu cầu',
'No resources currently registered': 'No resources currently registered',
'No resources currently reported': 'No resources currently reported',
'No service profile available': 'No service profile available',
'No skills currently set': 'No skills currently set',
'No status information available': 'No status information available',
'No synchronization': 'Chưa đồng bộ hóa',
'No tasks currently registered': 'No tasks currently registered',
'No template found!': 'Không tìm thấy mẫu',
'No units currently registered': 'No units currently registered',
'No volunteer information registered': 'Chưa đăng ký thông tin tình nguyện viên',
'None': 'None',
'None (no such record)': 'None (no such record)',
'Noodles': 'Mì',
'Normal': 'Normal',
'Normal food sources disrupted': 'Normal food sources disrupted',
'Not Applicable': 'Not Applicable',
'Not Authorised!': 'Chưa đăng nhập',
'Not Possible': 'Not Possible',
'Not Set': 'Not Set',
'Not authorised!': 'Not authorised!',
'Not installed or incorrectly configured.': 'Chưa cài đặt hoặc tùy chỉnh chưa đúng',
'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead.': 'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead.',
'Notice to Airmen': 'Lưu ý đối với các phi công',
'Number': 'Số',
'Number of Columns': 'Number of Columns',
'Number of Patients': 'Number of Patients',
'Number of Rows': 'Số hàng',
'Number of Vehicles': 'Number of Vehicles',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.',
'Number of alternative places for studying': 'Số địa điểm có thể dùng làm trường học tạm thời',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Number of available/vacant beds of that type in this unit at the time of reporting.',
'Number of deaths during the past 24 hours.': 'Number of deaths during the past 24 hours.',
'Number of discharged patients during the past 24 hours.': 'Number of discharged patients during the past 24 hours.',
'Number of doctors': 'Number of doctors',
'Number of doctors actively working': 'Number of doctors actively working',
'Number of houses damaged, but usable': 'Number of houses damaged, but usable',
'Number of houses destroyed/uninhabitable': 'Number of houses destroyed/uninhabitable',
'Number of in-patients at the time of reporting.': 'Number of in-patients at the time of reporting.',
'Number of latrines': 'Number of latrines',
'Number of midwives actively working': 'Number of midwives actively working',
'Number of newly admitted patients during the past 24 hours.': 'Số lượng bệnh nhân tiếp nhận trong 24h qua',
'Number of non-medical staff': 'Number of non-medical staff',
'Number of nurses': 'Number of nurses',
'Number of nurses actively working': 'Number of nurses actively working',
'Number of private schools': 'Số lượng trường tư',
'Number of public schools': 'Number of public schools',
'Number of religious schools': 'Number of religious schools',
'Number of schools damaged but usable': 'Number of schools damaged but usable',
'Number of schools destroyed/uninhabitable': 'Number of schools destroyed/uninhabitable',
'Number of schools open before disaster': 'Number of schools open before disaster',
'Number of schools open now': 'Number of schools open now',
'Number of teachers affected by disaster': 'Number of teachers affected by disaster',
'Number of teachers before disaster': 'Number of teachers before disaster',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Số các giường bệnh trống trong bệnh viện. Tự động cập nhật từ các báo cáo hàng ngày.',
'Number of vacant/available units to which victims can be transported immediately.': 'Number of vacant/available units to which victims can be transported immediately.',
'Number or Label on the identification tag this person is wearing (if any).': 'Number or Label on the identification tag this person is wearing (if any).',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Number/Percentage of affected population that is Female & Aged 0-5',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Number/Percentage of affected population that is Female & Aged 13-17',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Number/Percentage of affected population that is Female & Aged 18-25',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Number/Percentage of affected population that is Female & Aged 26-60',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Number/Percentage of affected population that is Female & Aged 6-12',
'Number/Percentage of affected population that is Female & Aged 61+': 'Number/Percentage of affected population that is Female & Aged 61+',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Đối tượng nam trong độ tuổi 0-5 chịu ảnh hưởng từ thiên tai',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Number/Percentage of affected population that is Male & Aged 13-17',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Number/Percentage of affected population that is Male & Aged 18-25',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Đối tượng là Nam giới và trong độ tuổi từ 26-60 chịu ảnh hưởng lớn từ thiên tai',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Number/Percentage of affected population that is Male & Aged 6-12',
'Number/Percentage of affected population that is Male & Aged 61+': 'Number/Percentage of affected population that is Male & Aged 61+',
'Numbers Only': 'Chỉ dùng số',
'Nursery Beds': 'Nursery Beds',
'Nutrition': 'Dinh dưỡng',
'OK': 'OK',
'OR Reason': 'OR Reason',
'OR Status': 'OR Status',
'OR Status Reason': 'OR Status Reason',
'Observer': 'Observer',
'Obstetrics/Gynecology': 'Sản khoa/Phụ khoa',
'Office': 'Office',
'Office Address': 'Địa chỉ văn phòng',
'Office Details': 'Office Details',
'Office added': 'Đã thêm Văn phòng',
'Office deleted': 'Đã xóa Văn phòng',
'Office updated': 'Office updated',
'Offices': 'Offices',
'Offline Sync': 'Offline Sync',
'Offline Sync (from USB/File Backup)': 'Offline Sync (from USB/File Backup)',
'Old': 'Old',
'Older people as primary caregivers of children': 'Older people as primary caregivers of children',
'Older people in care homes': 'Older people in care homes',
'Older people participating in coping activities': 'Older people participating in coping activities',
'Older people with chronical illnesses': 'Older people with chronical illnesses',
'Older person (>60 yrs)': 'Older person (>60 yrs)',
'On by default?': 'Bật theo mặc định',
'On by default? (only applicable to Overlays)': 'On by default? (only applicable to Overlays)',
'One Time Cost': 'One Time Cost',
'One time cost': 'One time cost',
'One-time': 'One-time',
'One-time costs': 'One-time costs',
'Oops! Something went wrong...': 'Oops! Something went wrong...',
'Oops! something went wrong on our side.': 'Oops! something went wrong on our side.',
'Open': 'Open',
'Open Assessment': 'Open Assessment',
'Open area': 'Open area',
'Open recent': 'Open recent',
'Operating Rooms': 'Operating Rooms',
'Optional link to an Incident which this Assessment was triggered by.': 'Optional link to an Incident which this Assessment was triggered by.',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.",
'Options': 'Tùy chọn',
'Organisation': 'Organisation',
'Organization': 'Tổ chức',
'Organization Details': 'Chi tiết Tổ chức',
'Organization Registry': 'Đăng ký tổ chức',
'Organization added': 'Organization added',
'Organization deleted': 'Organization deleted',
'Organization updated': 'Organization updated',
'Organizations': 'Tổ chức',
'Origin': 'Origin',
'Origin of the separated children': 'Origin of the separated children',
'Other': 'Other',
'Other (describe)': 'Other (describe)',
'Other (specify)': 'Other (specify)',
'Other Evidence': 'Bằng chứng khác',
'Other Faucet/Piped Water': 'Các đường xả lũ khác',
'Other Isolation': 'Những vùng bị cô lập khác',
'Other Name': 'Other Name',
'Other activities of boys 13-17yrs': 'Các hoạt động khác của nam thanh niên từ 13-17 tuổi',
'Other activities of boys 13-17yrs before disaster': 'Other activities of boys 13-17yrs before disaster',
'Other activities of boys <12yrs': 'Other activities of boys <12yrs',
'Other activities of boys <12yrs before disaster': 'Các hoạt động khác của bé trai dưới 12 tuổi trước khi xảy ra thiên tai',
'Other activities of girls 13-17yrs': 'Other activities of girls 13-17yrs',
'Other activities of girls 13-17yrs before disaster': 'Other activities of girls 13-17yrs before disaster',
'Other activities of girls<12yrs': 'Other activities of girls<12yrs',
'Other activities of girls<12yrs before disaster': 'Other activities of girls<12yrs before disaster',
'Other alternative infant nutrition in use': 'Other alternative infant nutrition in use',
'Other alternative places for study': 'Những nơi có thể dùng làm trường học tạm thời',
'Other assistance needed': 'Các hỗ trợ cần thiết',
'Other assistance, Rank': 'Những sự hỗ trợ khác,thứ hạng',
'Other current health problems, adults': 'Other current health problems, adults',
'Other current health problems, children': 'Other current health problems, children',
'Other events': 'Other events',
'Other factors affecting school attendance': 'Những yếu tố khác ảnh hưởng đến việc đến trường',
'Other major expenses': 'Other major expenses',
'Other school assistance received': 'Other school assistance received',
'Other school assistance, details': 'Other school assistance, details',
'Other school assistance, source': 'Other school assistance, source',
'Other side dishes in stock': 'Other side dishes in stock',
'Other types of water storage containers': 'Other types of water storage containers',
'Other ways to obtain food': 'Other ways to obtain food',
'Outbound Mail settings are configured in models/000_config.py.': 'Outbound Mail settings are configured in models/000_config.py.',
'Outbox': 'Outbox',
'Outgoing SMS Handler': 'Outgoing SMS Handler',
'Outgoing SMS handler': 'Outgoing SMS handler',
'Overland Flow Flood': 'Overland Flow Flood',
'Owned Resources': 'Owned Resources',
'PDAM': 'PDAM',
'PIN': 'PIN',
'PIN number ': 'PIN number ',
'PL Women': 'PL Women',
'Packet': 'Packet',
'Parameters': 'Parameters',
'Parent': 'Parent',
'Parent Office': 'Parent Office',
"Parent level should be higher than this record's level. Parent level is": "Parent level should be higher than this record's level. Parent level is",
'Parent needs to be of the correct level': 'Parent needs to be of the correct level',
'Parent needs to be set': 'Parent needs to be set',
'Parent needs to be set for locations of level': 'Parent needs to be set for locations of level',
'Parents/Caregivers missing children': 'Parents/Caregivers missing children',
'Participant': 'Participant',
'Pashto': 'Pashto',
'Passport': 'Passport',
'Password': 'Password',
"Password fields don't match": "Password fields don't match",
'Pathology': 'Pathology',
'Patients': 'Bệnh nhân',
'Pediatric ICU': 'Pediatric ICU',
'Pediatric Psychiatric': 'Pediatric Psychiatric',
'Pediatrics': 'Khoa Nhi',
'Peer': 'Peer',
'Peer Details': 'Peer Details',
'Peer Registration': 'Peer Registration',
'Peer Registration Details': 'Peer Registration Details',
'Peer Registration Request': 'yêu cầu đăng ký',
'Peer Type': 'Peer Type',
'Peer UID': 'Peer UID',
'Peer added': 'Peer added',
'Peer deleted': 'Peer deleted',
'Peer not allowed to push': 'Peer not allowed to push',
'Peer registration request added': 'Đã thêm yêu cầu đăng ký',
'Peer registration request deleted': 'Peer registration request deleted',
'Peer registration request updated': 'Cập nhật yêu cẩu đăng ký',
'Peer updated': 'Peer updated',
'Peers': 'Peers',
'Pending Requests': 'yêu cầu đang chờ',
'People': 'People',
'People Needing Food': 'People Needing Food',
'People Needing Shelter': 'People Needing Shelter',
'People Needing Water': 'People Needing Water',
'People Trapped': 'People Trapped',
'People with chronical illnesses': 'People with chronical illnesses',
'Person': 'Cá nhân',
'Person 1': 'Person 1',
'Person 1, Person 2 are the potentially duplicate records': 'Person 1, Person 2 are the potentially duplicate records',
'Person 2': 'Person 2',
'Person Data': 'Person Data',
'Person De-duplicator': 'Person De-duplicator',
'Person Details': 'Chi tiết cá nhân',
'Person Finder': 'Person Finder',
'Person Registry': 'Person Registry',
'Person added': 'Person added',
'Person deleted': 'Person deleted',
'Person details updated': 'Person details updated',
'Person interviewed': 'Person interviewed',
'Person missing': 'Person missing',
'Person reporting': 'Person reporting',
'Person who has actually seen the person/group.': 'Person who has actually seen the person/group.',
'Person who is reporting about the presence.': 'Person who is reporting about the presence.',
'Person who observed the presence (if different from reporter).': 'Người quan sát tình hình (nếu khác với phóng viên)',
'Person/Group': 'Person/Group',
'Personal Data': 'Personal Data',
'Personal Effects': 'Personal Effects',
'Personal Effects Details': 'Chi tiết ảnh hưởng cá nhân',
'Personal impact of disaster': 'Personal impact of disaster',
'Persons': 'Cá nhân',
'Persons with disability (mental)': 'Người tàn tật (về tinh thần)',
'Persons with disability (physical)': 'Người tàn tật (về thể chất)',
'Phone': 'Phone',
'Phone 1': 'Điện thoại 1',
'Phone 2': 'Điện thoại 2',
"Phone number to donate to this organization's relief efforts.": 'Số điện thoại để ủng hộ cho nỗ lực cứu trợ của tổ chức này',
'Phone/Business': 'Phone/Business',
'Phone/Emergency': 'Phone/Emergency',
'Phone/Exchange': 'Phone/Exchange',
'Photo': 'Photo',
'Photo Details': 'Chi tiết ảnh',
'Photo added': 'Photo added',
'Photo deleted': 'Photo deleted',
'Photo updated': 'Photo updated',
'Photograph': 'Photograph',
'Photos': 'Photos',
'Physical Description': 'Physical Description',
'Picture upload and finger print upload facility': 'Picture upload and finger print upload facility',
'Place for solid waste disposal': 'Place for solid waste disposal',
'Place of Recovery': 'Place of Recovery',
'Places the children have been sent to': 'Places the children have been sent to',
'Playing': 'Playing',
"Please come back after sometime if that doesn't help.": "Please come back after sometime if that doesn't help.",
'Please correct all errors.': 'Please correct all errors.',
'Please enter a First Name': 'Please enter a First Name',
'Please enter a valid email address': 'Please enter a valid email address',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Please enter the first few letters of the Person/Group for the autocomplete.',
'Please enter the recipient': 'Please enter the recipient',
'Please fill this!': 'Please fill this!',
'Please report here where you are:': 'Please report here where you are:',
'Please select another level': 'Please select another level',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Trường này được dùng để lưu các thông tin thêm, bao gồm lịch sử theo dõi của hồ sơ nếu nó được cập nhật.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.',
'Pledge': 'Pledge',
'Pledge Aid': 'Pledge Aid',
'Pledge Aid to match these Requests': 'Pledge Aid to match these Requests',
'Pledge Status': 'Pledge Status',
'Pledge Support': 'Pledge Support',
'Pledged': 'Pledged',
'Pledges': 'Pledges',
'Point': 'Point',
'Poisoning': 'Poisoning',
'Poisonous Gas': 'Poisonous Gas',
'Police': 'Police',
'Pollution and other environmental': 'Ô nhiễm và các vấn đề môi trường khác',
'Polygon': 'Polygon',
'Population': 'Population',
'Porridge': 'Cháo yến mạch',
'Port': 'Port',
'Port Closure': 'Port Closure',
'Position Details': 'Position Details',
'Position added': 'Position added',
'Position deleted': 'Position deleted',
'Position type': 'Position type',
'Position updated': 'Position updated',
'Positions': 'Positions',
'Postcode': 'Postcode',
'Poultry': 'Poultry',
'Poultry restocking, Rank': 'Thu mua gia cầm, thứ hạng',
'Pounds': 'Pounds',
'Power Failure': 'Power Failure',
'Powered by Sahana Eden': 'Powered by Sahana Eden',
'Preferred Name': 'Preferred Name',
'Pregnant women': 'Pregnant women',
'Preliminary': 'Preliminary',
'Presence': 'Presence',
'Presence Condition': 'Presence Condition',
'Presence Log': 'Presence Log',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": "Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.",
'Previous': 'Previous',
'Primary Name': 'Primary Name',
'Priority': 'Ưu tiên',
'Priority Level': 'Priority Level',
'Private': 'Private',
'Problem': 'Problem',
'Problem Administration': 'Quản lý vấn đề',
'Problem Details': 'Problem Details',
'Problem Group': 'Problem Group',
'Problem Title': 'Problem Title',
'Problem added': 'Problem added',
'Problem deleted': 'Problem deleted',
'Problem updated': 'Đã cập nhật vấn đề',
'Problems': 'Vấn đề',
'Procedure': 'Procedure',
'Procurements': 'Procurements',
'Product Description': 'Product Description',
'Product Name': 'Product Name',
'Profile': 'Profile',
'Project': 'Project',
'Project Activities': 'Các hoạt động của dự án',
'Project Details': 'Project Details',
'Project Management': 'Project Management',
'Project Status': 'Project Status',
'Project Tracking': 'Project Tracking',
'Project added': 'Dự án đã được thêm',
'Project deleted': 'Project deleted',
'Project has no Lat/Lon': 'Project has no Lat/Lon',
'Project updated': 'Project updated',
'Projection': 'Projection',
'Projection Details': 'Projection Details',
'Projection added': 'Projection added',
'Projection deleted': 'Projection deleted',
'Projection updated': 'Đã cập nhật kế hoạch dự phòng',
'Projections': 'Projections',
'Projects': 'Projects',
'Protected resource': 'Protected resource',
'Protection': 'Protection',
'Provide Metadata for your media files': 'Provide Metadata for your media files',
'Provide a password': 'Provide a password',
'Province': 'Tỉnh/thành',
'Proxy-server': 'Proxy-server',
'Psychiatrics/Adult': 'Psychiatrics/Adult',
'Psychiatrics/Pediatric': 'Khoa thần kinh/Khoa nhi',
'Public': 'Public',
'Public Event': 'Public Event',
'Public and private transportation': 'Phương tiện vận chuyển công cộng và cá nhân',
'Pull tickets from external feed': 'Pull tickets from external feed',
'Punjabi': 'Punjabi',
'Push tickets to external system': 'Push tickets to external system',
'Put a choice in the box': 'Put a choice in the box',
'Pyroclastic Flow': 'Pyroclastic Flow',
'Pyroclastic Surge': 'Núi lửa phun',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial module not available within the running Python - this needs installing to activate the Modem',
'Quantity': 'Quantity',
'Quarantine': 'Quarantine',
'Queries': 'Queries',
'Query': 'Query',
'Queryable?': 'Queryable?',
'RECORD A': 'RECORD A',
'RECORD B': 'RECORD B',
'RESPONSE': 'RESPONSE',
'Race': 'Race',
'Radiological Hazard': 'Radiological Hazard',
'Radiology': 'Radiology',
'Railway Accident': 'Tại nạn đường sắt',
'Railway Hijacking': 'Railway Hijacking',
'Rain Fall': 'Rain Fall',
'Rapid Assessment': 'Rapid Assessment',
'Rapid Assessment Details': 'Rapid Assessment Details',
'Rapid Assessment added': 'Rapid Assessment added',
'Rapid Assessment deleted': 'Rapid Assessment deleted',
'Rapid Assessment updated': 'Rapid Assessment updated',
'Rapid Assessments': 'Rapid Assessments',
'Rapid Assessments & Flexible Impact Assessments': 'Rapid Assessments & Flexible Impact Assessments',
'Rapid Close Lead': 'Rapid Close Lead',
'Rating Scale': 'Rating Scale',
'Raw Database access': 'Raw Database access',
'Real World Arbitrary Units': 'Real World Arbitrary Units',
'Receive': 'Receive',
'Receive Items': 'Receive Items',
'Receive Shipment': 'Receive Shipment',
'Received': 'Received',
'Received By': 'Received By',
'Received Item Details': 'Received Item Details',
'Received Item added': 'Received Item added',
'Received Item deleted': 'Received Item deleted',
'Received Item updated': 'Received Item updated',
'Received Items': 'Received Items',
'Received Items added to Warehouse Items': 'Received Items added to Warehouse Items',
'Received Shipment Details': 'Received Shipment Details',
'Received Shipment canceled': 'Received Shipment canceled',
'Received Shipment updated': 'Received Shipment updated',
'Received Shipments': 'Received Shipments',
'Recipient': 'Recipient',
'Recipients': 'Người nhận viện trợ',
'Record Details': 'Record Details',
'Record ID': 'Record ID',
'Record Saved': 'Record Saved',
'Record added': 'Hồ sơ đã được thêm',
'Record deleted': 'Record deleted',
'Record last updated': 'Record last updated',
'Record not found!': 'Record not found!',
'Record updated': 'Record updated',
'Records': 'Records',
'Recovery': 'Recovery',
'Recovery Request': 'Phục hồi yêu cầu',
'Recovery Request added': 'Đã thêm yêu cầu phục hồi',
'Recovery Request deleted': 'phục hồi các yêu cầu bị xóa',
'Recovery Request updated': 'Cập nhật Yêu cầu phục hồi',
'Recovery Requests': 'Phục hồi yêu cầu',
'Recovery report added': 'Recovery report added',
'Recovery report deleted': 'Recovery report deleted',
'Recovery report updated': 'Recovery report updated',
'Recurring': 'Định kỳ',
'Recurring Cost': 'Recurring Cost',
'Recurring cost': 'Recurring cost',
'Recurring costs': 'Chi phí định kỳ',
'Reference Document': 'Reference Document',
'Regional': 'Địa phương',
'Register': 'Register',
'Register Person': 'Đăng ký Cá nhân',
'Register Person into this Shelter': 'Register Person into this Shelter',
'Register them as a volunteer': 'Register them as a volunteer',
'Registered People': 'Registered People',
'Registered users can': 'Người dùng đã đăng ký có thể',
'Registering ad-hoc volunteers willing to contribute': 'Registering ad-hoc volunteers willing to contribute',
'Registration': 'Registration',
'Registration Details': 'Registration Details',
'Registration added': 'Bản đăng ký đã được thêm',
'Registration entry deleted': 'Registration entry deleted',
'Registration key': 'Registration key',
'Registration updated': 'Registration updated',
'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.',
'Rehabilitation/Long Term Care': 'Rehabilitation/Long Term Care',
'Reliable access to sanitation/hygiene items': 'Reliable access to sanitation/hygiene items',
'Relief': 'Relief',
'Relief Item Catalog': 'Relief Item Catalog',
'Relief Team': 'Relief Team',
'Religion': 'Religion',
'Religious Leader': 'Religious Leader',
'Relocate as instructed in the <instruction>': 'Relocate as instructed in the <instruction>',
'Remove': 'Remove',
'Repeat your password': 'Repeat your password',
'Replace': 'Replace',
'Replace if Master': 'Replace if Master',
'Replace if Newer': 'Thay thế nếu mới hơn',
'Report': 'Report',
'Report Another Assessment...': 'Report Another Assessment...',
'Report Details': 'Report Details',
'Report Resource': 'Report Resource',
'Report Type': 'Loại báo cáo',
'Report Types Include': 'Report Types Include',
'Report a Problem with the Software': 'báo cáo lỗi bằng phần mềm',
'Report added': 'Đã thêm báo cáo',
'Report deleted': 'Đã xóa báo cáo',
'Report my location': 'Report my location',
'Report that person missing': 'Report that person missing',
'Report the contributing factors for the current EMS status.': 'Báo cáo các nhân tố đóng góp cho tình trạng EMS hiện tại.',
'Report the contributing factors for the current OR status.': 'Report the contributing factors for the current OR status.',
'Report the person as found': 'Report the person as found',
'Report them as found': 'Report them as found',
'Report them missing': 'Report them missing',
'Report updated': 'Report updated',
'ReportLab module not available within the running Python - this needs installing for PDF output!': 'ReportLab module not available within the running Python - this needs installing for PDF output!',
'Reporter': 'Reporter',
'Reporter Name': 'Reporter Name',
'Reporting on the projects in the region': 'Reporting on the projects in the region',
'Reports': 'Reports',
'Request': 'Yêu cầu',
'Request Added': 'Request Added',
'Request Canceled': 'Request Canceled',
'Request Details': 'Chi tiết yêu cầu',
'Request Item': 'Request Item',
'Request Item Details': 'Chi tiết yêu cầu hàng hóa',
'Request Item added': 'Đã thêm yêu cầu hàng hóa',
'Request Item deleted': 'Xóa yêu cầu hàng hóa',
'Request Item updated': 'Đã cập nhật hàng hóa yêu cầu',
'Request Items': 'Yêu cầu hàng hóa',
'Request Type': 'Loại yêu cầu',
'Request Updated': 'Request Updated',
'Request added': 'Request added',
'Request deleted': 'Request deleted',
'Request for Role Upgrade': 'yêu cầu nâng cấp vai trò',
'Request updated': 'Request updated',
'Request, Response & Session': 'Yêu cầu, Phản hồi và Tương tác',
'Requested': 'Đã yêu cầu',
'Requested By Location': 'Requested By Location',
'Requested From Warehouse': 'Requested From Warehouse',
'Requested by': 'Yêu cầu bởi',
'Requested on': 'Requested on',
'Requester': 'Requester',
'Requestor': 'Người yêu cầu',
'Requests': 'Yêu cầu',
'Requests From': 'Requests From',
'Requests for Item': 'Yêu cầu hàng hóa',
'Requires Login!': 'Requires Login!',
'Requires login': 'Requires login',
'Rescue and recovery': 'Rescue and recovery',
'Reset': 'Reset',
'Reset Password': 'Đặt lại mật khẩu',
'Reset form': 'Đặt lại mẫu',
'Resolve': 'Resolve',
'Resolve Conflict': 'Resolve Conflict',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.',
'Resource': 'Resource',
'Resource Details': 'Resource Details',
'Resource added': 'Resource added',
'Resource deleted': 'Resource deleted',
'Resource updated': 'Resource updated',
'Resources': 'Tài nguyên',
'Respiratory Infections': 'Respiratory Infections',
'Response': 'Response',
'Response Details': 'Response Details',
'Response added': 'Response added',
'Response deleted': 'Xóa phản hồi',
'Response updated': 'Response updated',
'Responses': 'Responses',
'Restricted Access': 'Restricted Access',
'Restrictions': 'Restrictions',
'Results': 'Results',
'Retail Crime': 'Retail Crime',
'Retrieve Password': 'Retrieve Password',
'Rice': 'Rice',
'Riot': 'Riot',
'River': 'River',
'River Details': 'Chi tiết Sông',
'River added': 'River added',
'River deleted': 'River deleted',
'River updated': 'River updated',
'Rivers': 'Rivers',
'Road Accident': 'Tai nạn giao thông đường bộ',
'Road Closed': 'Road Closed',
'Road Conditions': 'Road Conditions',
'Road Delay': 'Road Delay',
'Road Hijacking': 'Road Hijacking',
'Road Usage Condition': 'Điều kiện lưu thông đường bộ',
'Role': 'Role',
'Role Details': 'Chi tiết vai trò',
'Role Manager': 'Role Manager',
'Role Required': 'Role Required',
'Role Updated': 'Role Updated',
'Role added': 'Role added',
'Role deleted': 'Role deleted',
'Role updated': 'Role updated',
'Role-based': 'Role-based',
'Roles': 'Roles',
'Roles Permitted': 'Roles Permitted',
'Roof tile': 'Roof tile',
'Row Choices (One Per Line)': 'Row Choices (One Per Line)',
'Rows in table': 'Rows in table',
'Rows selected': 'Rows selected',
'Run Functional Tests': 'Kiểm thử chức năng',
'Run Interval': 'Run Interval',
'Running Cost': 'Running Cost',
'SITUATION': 'SITUATION',
'Safe environment for vulnerable groups': 'Safe environment for vulnerable groups',
'Safety of children and women affected by disaster': 'Safety of children and women affected by disaster',
'Sahana Administrator': 'Quản trị viên Sahana',
'Sahana Agasti': 'Sahana Agasti',
'Sahana Blue': 'Sahana Blue',
'Sahana Community Chat': 'Sahana Community Chat',
'Sahana Eden': 'Sahana Eden',
'Sahana Eden <=> Other': 'Sahana Eden <=> Other',
'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)',
'Sahana Eden <=> Sahana Eden': 'Sahana Eden <=> Sahana Eden',
'Sahana Eden Disaster Management Platform': 'Sahana Eden Disaster Management Platform',
'Sahana Eden Open Source Disaster Management Platform': 'Sahana Eden Open Source Disaster Management Platform',
'Sahana Eden Website': 'Website Sahana Eden',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management.': 'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management.',
'Sahana FOSS Disaster Management System': 'Sahana FOSS Disaster Management System',
'Sahana Green': 'Sahana Green',
'Sahana Login Approval Pending': 'Chờ chấp nhận đăng nhập vào Sahana',
'Sahana Steel': 'Thép Sahana',
'Sahana access granted': 'Sahana access granted',
'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana: new request has been made. Please login to see if you can fulfil the request.',
'Salted Fish': 'Salted Fish',
'Salvage material usable from destroyed houses': 'Salvage material usable from destroyed houses',
'Salvage material usable from destroyed schools': 'Salvage material usable from destroyed schools',
'Sanitation problems': 'Sanitation problems',
'Satellite': 'Vệ tinh',
'Satellite Office': 'Satellite Office',
'Saturday': 'Saturday',
'Save': 'Lưu',
'Save any Changes in the one you wish to keep': 'Lưu mọi thay đổi ở bất kỳ nơi nào bạn muốn',
'Saved.': 'Saved.',
'Saving...': 'Saving...',
'Scale of Results': 'Phạm vi của kết quả',
'Schedule': 'Lịch trình',
'School': 'School',
'School Closure': 'School Closure',
'School Lockdown': 'School Lockdown',
'School Reports': 'School Reports',
'School Teacher': 'School Teacher',
'School assistance received/expected': 'School assistance received/expected',
'School destroyed': 'School destroyed',
'School heavily damaged': 'School heavily damaged',
'School tents received': 'School tents received',
'School tents, source': 'School tents, source',
'School used for other purpose': 'School used for other purpose',
'School/studying': 'School/studying',
'Schools': 'Schools',
'Search': 'Tìm kiếm',
'Search & List Bin Types': 'Search & List Bin Types',
'Search & List Bins': 'Search & List Bins',
'Search & List Catalog': 'Tìm kiếm và liệt kê các danh mục',
'Search & List Category': 'Tìm và liệt kê danh mục',
'Search & List Items': 'Tìm kiếm và hiển thị danh sách hàng hóa',
'Search & List Locations': 'Tìm và liệt kê các địa điểm',
'Search & List Site': 'Search & List Site',
'Search & List Sub-Category': 'Tìm kiếm và lên danh sách Tiêu chí phụ',
'Search & List Unit': 'Search & List Unit',
'Search Activities': 'Tìm kiếm các hoạt động',
'Search Activity Report': 'Search Activity Report',
'Search Addresses': 'Search Addresses',
'Search Aid Requests': 'Tìm kiếm Yêu cầu cứu trợ',
'Search Assessment Summaries': 'Search Assessment Summaries',
'Search Assessments': 'Tìm kiếm các đánh giá',
'Search Baseline Type': 'Search Baseline Type',
'Search Baselines': 'Search Baselines',
'Search Budgets': 'Tìm kiếm các ngân sách',
'Search Bundles': 'Search Bundles',
'Search Catalog Items': 'Search Catalog Items',
'Search Category<>Sub-Category<>Catalog Relation': 'Search Category<>Sub-Category<>Catalog Relation',
'Search Checklists': 'Tìm kiếm Checklist',
'Search Cluster Subsectors': 'Search Cluster Subsectors',
'Search Clusters': 'Search Clusters',
'Search Configs': 'Search Configs',
'Search Contact Information': 'Tìm thông tin liên lạc',
'Search Contacts': 'Tìm kiếm các đầu mối liên lạc',
'Search Distribution Items': 'Search Distribution Items',
'Search Distributions': 'Tìm kiếm Quyên góp',
'Search Documents': 'Tìm kiếm các tài liệu',
'Search Donors': 'Tìm kiếm những người ủng hộ',
'Search Feature Class': 'Search Feature Class',
'Search Feature Layers': 'Tìm kiếm Layer chức năng',
'Search Flood Reports': 'Tìm các báo cáo về lũ lụt',
'Search Groups': 'Search Groups',
'Search Hospitals': 'Tìm kếm các bệnh viện',
'Search Identity': 'Search thông tin nhận dạng',
'Search Images': 'Tìm kếm các ảnh',
'Search Impact Type': 'Search Impact Type',
'Search Impacts': 'Search Impacts',
'Search Incident Reports': 'Search Incident Reports',
'Search Incidents': 'Search Incidents',
'Search Inventory Items': 'Search Inventory Items',
'Search Inventory Stores': 'Search Inventory Stores',
'Search Item Catalog Category(s)': 'Search Item Catalog Category(s)',
'Search Item Catalog(s)': 'Tìm kiếm Catalog hàng hóa',
'Search Item Categories': 'Search Item Categories',
'Search Item Packets': 'Search Item Packets',
'Search Item Sub-Category(s)': 'Search Item Sub-Category(s)',
'Search Items': 'Search Items',
'Search Keys': 'Tìm kiếm mã',
'Search Kits': 'Search Kits',
'Search Layers': 'Tìm kiếm các lớp',
'Search Locations': 'Tìm kiếm các địa điểm',
'Search Log Entry': 'Search Log Entry',
'Search Markers': 'Search Markers',
'Search Member': 'Tìm thành viên',
'Search Membership': 'Tìm kiếm thành viên',
'Search Memberships': 'Tim kiếm thành viên',
'Search Metadata': 'Tìm kiếm dữ liệu',
'Search Need Type': 'Search Need Type',
'Search Needs': 'Search Needs',
'Search Offices': 'Tìm các văn phòng',
'Search Organizations': 'Tìm kiếm các tổ chức',
'Search Peer': 'Search Peer',
'Search Personal Effects': 'Search Personal Effects',
'Search Persons': 'Tìm kiếm Cá nhân',
'Search Photos': 'Tìm kiếm ảnh',
'Search Positions': 'Search Positions',
'Search Problems': 'Search Problems',
'Search Projections': 'Search Projections',
'Search Projects': 'Tìm kiếm các dự án',
'Search Rapid Assessments': 'Search Rapid Assessments',
'Search Received Items': 'Search Received Items',
'Search Received Shipments': 'Search Received Shipments',
'Search Records': 'Tìm các hồ sơ',
'Search Recovery Reports': 'Search Recovery Reports',
'Search Registations': 'Tìm kiếm các đăng ký',
'Search Registration Request': 'Tìm kiếm Yêu cầu Đăng ký',
'Search Report': 'Tìm kiếm báo cáo',
'Search Reports': 'Tìm kiếm Báo cáo',
'Search Request': 'Tìm kiếm yêu cầu',
'Search Request Items': 'Tìm kiếm Yêu cầu hàng hóa',
'Search Requests': 'Search Requests',
'Search Resources': 'Tìm kiếm các nguồn lực',
'Search Responses': 'Search Responses',
'Search Rivers': 'Search Rivers',
'Search Roles': 'Tìm các vai trò',
'Search Sections': 'Search Sections',
'Search Sectors': 'Search Sectors',
'Search Sent Items': 'Search Sent Items',
'Search Sent Shipments': 'Search Sent Shipments',
'Search Service Profiles': 'Search Service Profiles',
'Search Settings': 'Search Settings',
'Search Shelter Services': 'Search Shelter Services',
'Search Shelter Types': 'Tìm kiếm Loại Cư trú',
'Search Shelters': 'Search Shelters',
'Search Shipment Transit Logs': 'Search Shipment Transit Logs',
'Search Shipment/Way Bills': 'Search Shipment/Way Bills',
'Search Shipment<>Item Relation': 'Search Shipment<>Item Relation',
'Search Site(s)': 'Search Site(s)',
'Search Skill Types': 'Search Skill Types',
'Search Skills': 'Search Skills',
'Search Solutions': 'Search Solutions',
'Search Staff': 'Search Staff',
'Search Staff Types': 'Search Staff Types',
'Search Status': 'Search Status',
'Search Storage Bin Type(s)': 'Search Storage Bin Type(s)',
'Search Storage Bin(s)': 'Search Storage Bin(s)',
'Search Storage Location(s)': 'Tìm kiếm kho lưu trữ',
'Search Subscriptions': 'Tìm kiếm danh sách, số tiền quyên góp',
'Search Tasks': 'Search Tasks',
'Search Teams': 'Tìm kiếm các đội',
'Search Themes': 'Tìm kiếm chủ đề',
'Search Tickets': 'Search Tickets',
'Search Tracks': 'Tìm kiếm dấu vết',
'Search Twitter Tags': 'Search Twitter Tags',
'Search Units': 'Search Units',
'Search Users': 'Search Users',
'Search Volunteer Registrations': 'Tìm kiếm Đăng ký tình nguyện viên',
'Search Volunteers': 'Search Volunteers',
'Search Warehouse Items': 'Search Warehouse Items',
'Search Warehouses': 'Search Warehouses',
'Search and Edit Group': 'Tìm và sửa thông tin nhóm',
'Search and Edit Individual': 'Tìm kiếm và chỉnh sửa cá nhân',
'Search by ID Tag': 'Search by ID Tag',
'Search by Skill Types': 'Search by Skill Types',
'Search for Items': 'Search for Items',
'Search for a Hospital': 'Tìm kiếm bệnh viện',
'Search for a Location': 'Tìm một địa điểm',
'Search for a Person': 'Tìm kiếm một người',
'Search for a Project': 'Tìm kiếm dự án',
'Search for a Request': 'Tìm kiếm một yêu cầu',
'Search here for a person in order to:': 'Search here for a person in order to:',
"Search here for a person's record in order to:": "Search here for a person's record in order to:",
'Search messages': 'Search messages',
'Searching for different groups and individuals': 'Searching for different groups and individuals',
'Secondary Server (Optional)': 'Secondary Server (Optional)',
'Seconds must be a number between 0 and 60': 'Giây phải là số từ 0 đến 60',
'Section Details': 'Chi tiết khu vực',
'Section deleted': 'Section deleted',
'Section updated': 'Section updated',
'Sections': 'Sections',
'Sector': 'Sector',
'Sector Details': 'Sector Details',
'Sector added': 'Sector added',
'Sector deleted': 'Sector deleted',
'Sector updated': 'Sector updated',
'Sectors': 'Sectors',
'Security Policy': 'Chính sách bảo mật',
'Security Status': 'Security Status',
'Security problems': 'Security problems',
'Seen': 'Seen',
'Select 2 potential locations from the dropdowns.': 'Select 2 potential locations from the dropdowns.',
'Select Photos': 'Select Photos',
'Select a location': 'Select a location',
"Select a person in charge for status 'assigned'": "Select a person in charge for status 'assigned'",
'Select a question from the list': 'Chọn một câu hỏi trong danh sách',
'Select all that apply': 'Chọn tất cả các áp dụng trên',
'Select an Organization to see a list of offices': 'Select an Organization to see a list of offices',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.',
'Select the person assigned to this role for this project.': 'Select the person assigned to this role for this project.',
'Select the person associated with this scenario.': 'Select the person associated with this scenario.',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS',
'Self Registration': 'Self Registration',
'Self-registration': 'Self-registration',
'Send': 'Send',
'Send Alerts using Email &/or SMS': 'Send Alerts using Email &/or SMS',
'Send Mail': 'Send Mail',
'Send Notification': 'Send Notification',
'Send Shipment': 'Send Shipment',
'Send message': 'Gửi tin nhắn',
'Send new message': 'Gửi tin nhắn mới',
'Sends & Receives Alerts via Email & SMS': 'Sends & Receives Alerts via Email & SMS',
'Senior (50+)': 'Senior (50+)',
'Sensitivity': 'Mức độ nhạy cảm',
'Sent': 'Sent',
'Sent Item': 'Sent Item',
'Sent Item Details': 'Sent Item Details',
'Sent Item added': 'Sent Item added',
'Sent Item deleted': 'Sent Item deleted',
'Sent Item updated': 'Sent Item updated',
'Sent Items': 'Sent Items',
'Sent Shipment Details': 'Sent Shipment Details',
'Sent Shipment canceled': 'Sent Shipment canceled',
'Sent Shipment updated': 'Sent Shipment updated',
'Sent Shipments': 'Sent Shipments',
'Separate latrines for women and men': 'Separate latrines for women and men',
'Seraiki': 'Seraiki',
'Series': 'Series',
'Server': 'Server',
'Service': 'Service',
'Service Catalogue': 'Service Catalogue',
'Service or Facility': 'Dịch vụ hoặc phương tiện',
'Service profile added': 'Đã thêm thông tin dịch vụ',
'Service profile deleted': 'Service profile deleted',
'Service profile updated': 'Service profile updated',
'Services': 'Dịch vụ',
'Services Available': 'Các dịch vụ đang triển khai',
'Setting Details': 'Setting Details',
'Setting added': 'Đã thêm cài đặt',
'Setting deleted': 'Setting deleted',
'Setting updated': 'Setting updated',
'Settings': 'Cài đặt',
'Settings updated': 'Settings updated',
'Settings were reset because authenticating with Twitter failed': 'Settings were reset because authenticating with Twitter failed',
'Severity': 'Severity',
'Severity:': 'Severity:',
'Share a common Marker (unless over-ridden at the Feature level)': 'Chia sẻ Đèn hiệu chung(nếu không vượt mức tính năng)',
'Shelter': 'Cư trú',
'Shelter & Essential NFIs': 'Shelter & Essential NFIs',
'Shelter Details': 'Shelter Details',
'Shelter Name': 'Shelter Name',
'Shelter Registry': 'Đăng ký tạm trú',
'Shelter Service': 'Shelter Service',
'Shelter Service Details': 'Chi tiết dịch vụ cư trú',
'Shelter Service added': 'Shelter Service added',
'Shelter Service deleted': 'Shelter Service deleted',
'Shelter Service updated': 'Shelter Service updated',
'Shelter Services': 'Dịch vụ cư trú',
'Shelter Type': 'Shelter Type',
'Shelter Type Details': 'Shelter Type Details',
'Shelter Type added': 'Shelter Type added',
'Shelter Type deleted': 'Shelter Type deleted',
'Shelter Type updated': 'Shelter Type updated',
'Shelter Types': 'Shelter Types',
'Shelter Types and Services': 'Shelter Types and Services',
'Shelter added': 'Đã thêm Thông tin cư trú',
'Shelter deleted': 'Shelter deleted',
'Shelter updated': 'Shelter updated',
'Shelter/NFI assistance received/expected': 'Shelter/NFI assistance received/expected',
'Shelters': 'Shelters',
'Shipment Received': 'Shipment Received',
'Shipment Sent': 'Shipment Sent',
'Shipment Transit Log Details': 'Shipment Transit Log Details',
'Shipment Transit Log added': 'Shipment Transit Log added',
'Shipment Transit Log deleted': 'Shipment Transit Log deleted',
'Shipment Transit Log updated': 'Shipment Transit Log updated',
'Shipment Transit Logs': 'Shipment Transit Logs',
'Shipment/Way Bill added': 'Shipment/Way Bill added',
'Shipment/Way Bills': 'Shipment/Way Bills',
'Shipment/Way Bills Details': 'Shipment/Way Bills Details',
'Shipment/Way Bills deleted': 'Shipment/Way Bills deleted',
'Shipment/Way Bills updated': 'Shipment/Way Bills updated',
'Shipment<>Item Relation added': 'Shipment<>Item Relation added',
'Shipment<>Item Relation deleted': 'Shipment<>Item Relation deleted',
'Shipment<>Item Relation updated': 'Shipment<>Item Relation updated',
'Shipment<>Item Relations': 'Shipment<>Item Relations',
'Shipment<>Item Relations Details': 'Shipment<>Item Relations Details',
'Shipments': 'Shipments',
'Shipments To': 'Shipments To',
'Shooting': 'Shooting',
'Short Assessment': 'Short Assessment',
'Short Description': 'Short Description',
'Show Checklist': 'Show Checklist',
'Show on map': 'Hiển thị trên bản đồ',
'Sindhi': 'Sindhi',
'Site': 'Địa điểm',
'Site Address': 'Site Address',
'Site Administration': 'Quản trị Site',
'Site Description': 'Site Description',
'Site Details': 'Site Details',
'Site ID': 'Site ID',
'Site Location Description': 'Site Location Description',
'Site Location Name': 'Site Location Name',
'Site Manager': 'Site Manager',
'Site Name': 'Site Name',
'Site added': 'Site added',
'Site deleted': 'Site deleted',
'Site updated': 'Site updated',
'Site/Warehouse': 'Site/Warehouse',
'Sites': 'Trang web',
'Situation Awareness & Geospatial Analysis': 'Nhận biết tình huống và phân tích tọa độ địa lý',
'Sketch': 'Sketch',
'Skill': 'Skill',
'Skill Details': 'Chi tiết kỹ năng',
'Skill Status': 'Skill Status',
'Skill Type Details': 'Skill Type Details',
'Skill Type added': 'Skill Type added',
'Skill Type deleted': 'Skill Type deleted',
'Skill Type updated': 'Skill Type updated',
'Skill Types': 'Skill Types',
'Skill added': 'Đã thêm kỹ năng',
'Skill deleted': 'Skill deleted',
'Skill updated': 'Skill updated',
'Skills': 'Skills',
'Skype ID': 'Skype ID',
'Small Trade': 'Small Trade',
'Smoke': 'Smoke',
'Snow Fall': 'Snow Fall',
'Snow Squall': 'Snow Squall',
'Solid waste': 'Solid waste',
'Solution': 'Solution',
'Solution Details': 'Solution Details',
'Solution Item': 'Solution Item',
'Solution added': 'Solution added',
'Solution deleted': 'Đã xóa giải pháp',
'Solution updated': 'Solution updated',
'Solutions': 'Solutions',
'Some': 'Some',
'Sorry - the server has a problem, please try again later.': 'Sorry - the server has a problem, please try again later.',
'Sorry that location appears to be outside the area of the Parent.': 'Sorry that location appears to be outside the area of the Parent.',
'Sorry that location appears to be outside the area supported by this deployment.': 'Sorry that location appears to be outside the area supported by this deployment.',
'Sorry, I could not understand your request': 'Xin lỗi, tôi không hiểu yêu cầu của bạn',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Sorry, only users with the MapAdmin role are allowed to edit these locations',
'Sorry, something went wrong.': 'Sorry, something went wrong.',
'Sorry, that page is forbidden for some reason.': 'Xin lỗi, trang này bị cấm vì một số lý do',
'Sorry, that service is temporary unavailable.': 'Xin lỗi, dịch vụ đó tạm thời không hoạt động',
'Sorry, there are no addresses to display': 'Sorry, there are no addresses to display',
"Sorry, things didn't get done on time.": "Sorry, things didn't get done on time.",
"Sorry, we couldn't find that page.": 'Xin lỗi, chúng tôi không tìm thấy trang đó',
'Source': 'Source',
'Source ID': 'Source ID',
'Source Time': 'Source Time',
'Source Type': 'Source Type',
'Space Debris': 'Space Debris',
'Spanish': 'Người Tây Ban Nha',
'Special Ice': 'Special Ice',
'Special Marine': 'Special Marine',
'Special needs': 'Nhu cầu đặc biệt',
'Specialized Hospital': 'Bệnh viện chuyên khoa',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.',
'Specific locations need to have a parent of level': 'Specific locations need to have a parent of level',
'Specify a descriptive title for the image.': 'Specify a descriptive title for the image.',
'Specify the bed type of this unit.': 'Specify the bed type of this unit.',
'Specify the minimum sustainability in weeks or days.': 'Specify the minimum sustainability in weeks or days.',
'Specify the number of available sets': 'Specify the number of available sets',
'Specify the number of available units (adult doses)': 'Specify the number of available units (adult doses)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions',
'Specify the number of sets needed per 24h': 'Specify the number of sets needed per 24h',
'Specify the number of units (adult doses) needed per 24h': 'Specify the number of units (adult doses) needed per 24h',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h',
'Spherical Mercator?': 'Spherical Mercator?',
'Spreadsheet Importer': 'Spreadsheet Importer',
'Spreadsheet uploaded': 'Spreadsheet uploaded',
'Spring': 'Spring',
'Squall': 'Squall',
'Staff': 'Staff',
'Staff 2': 'Staff 2',
'Staff Details': 'Staff Details',
'Staff Type Details': 'Staff Type Details',
'Staff Type added': 'Staff Type added',
'Staff Type deleted': 'Staff Type deleted',
'Staff Type updated': 'Staff Type updated',
'Staff Types': 'Staff Types',
'Staff added': 'Staff added',
'Staff deleted': 'Xóa tên nhân viên',
'Staff present and caring for residents': 'Staff present and caring for residents',
'Staff updated': 'Staff updated',
'Staffing': 'Staffing',
'Start date': 'Ngày bắt đầu',
'Start of Period': 'Start of Period',
'State': 'State',
'Stationery': 'Stationery',
'Status': 'Status',
'Status Report': 'Status Report',
'Status added': 'Status added',
'Status deleted': 'Status deleted',
'Status of clinical operation of the facility.': 'Status of clinical operation of the facility.',
'Status of general operation of the facility.': 'Status of general operation of the facility.',
'Status of morgue capacity.': 'Status of morgue capacity.',
'Status of operations of the emergency department of this hospital.': 'Tình trạng hoạt động của phòng cấp cứu tại bệnh viện này',
'Status of security procedures/access restrictions in the hospital.': 'Trạng thái của các giới hạn thủ tục/truy nhập an ninh trong bệnh viện',
'Status of the operating rooms of this hospital.': 'Trạng thái các phòng bệnh trong bệnh viện này',
'Status updated': 'Status updated',
'Storage Bin': 'Storage Bin',
'Storage Bin Details': 'Storage Bin Details',
'Storage Bin Number': 'Storage Bin Number',
'Storage Bin Type': 'Storage Bin Type',
'Storage Bin Type Details': 'Storage Bin Type Details',
'Storage Bin Type added': 'Storage Bin Type added',
'Storage Bin Type deleted': 'Storage Bin Type deleted',
'Storage Bin Type updated': 'Storage Bin Type updated',
'Storage Bin Types': 'Storage Bin Types',
'Storage Bin added': 'Storage Bin added',
'Storage Bin deleted': 'Storage Bin deleted',
'Storage Bin updated': 'Storage Bin updated',
'Storage Bins': 'Storage Bins',
'Storage Location': 'Storage Location',
'Storage Location Details': 'Storage Location Details',
'Storage Location ID': 'Storage Location ID',
'Storage Location Name': 'Storage Location Name',
'Storage Location added': 'Storage Location added',
'Storage Location deleted': 'Storage Location deleted',
'Storage Location updated': 'Storage Location updated',
'Storage Locations': 'Storage Locations',
'Store spreadsheets in the Eden database': 'Store spreadsheets in the Eden database',
'Storm Force Wind': 'Storm Force Wind',
'Storm Surge': 'Storm Surge',
'Stowaway': 'Stowaway',
'Street': 'Street',
'Street (continued)': 'Street (continued)',
'Street Address': 'Street Address',
'Strong Wind': 'Strong Wind',
'Sub Category': 'Sub Category',
'Sub-type': 'Sub-type',
'Subject': 'Subject',
'Submission successful - please wait': 'Submission successful - please wait',
'Submission successful - please wait...': 'Submission successful - please wait...',
'Subscription Details': 'Subscription Details',
'Subscription added': 'Subscription added',
'Subscription deleted': 'Subscription deleted',
'Subscription updated': 'Subscription updated',
'Subscriptions': 'Quyên góp',
'Subsistence Cost': 'Mức sống tối thiểu',
'Sufficient care/assistance for chronically ill': 'Sufficient care/assistance for chronically ill',
'Suggest not changing this field unless you know what you are doing.': 'Khuyến nghị bạn không thay đổi trường này khi chưa chắc chắn',
'Summary': 'Summary',
'Sunday': 'Sunday',
'Support Request': 'Hỗ trợ yêu cầu',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.',
'Sure you want to delete this object?': 'Sure you want to delete this object?',
'Surgery': 'Surgery',
'Survey Answer': 'Survey Answer',
'Survey Answer Details': 'Survey Answer Details',
'Survey Answer added': 'Trả lời khảo sát đã được thêm',
'Survey Answer deleted': 'Survey Answer deleted',
'Survey Answer updated': 'Survey Answer updated',
'Survey Module': 'Survey Module',
'Survey Name': 'Tên khảo sát',
'Survey Question': 'Survey Question',
'Survey Question Details': 'Survey Question Details',
'Survey Question Display Name': 'Tên trên bảng câu hỏi khảo sát',
'Survey Question added': 'Survey Question added',
'Survey Question deleted': 'Survey Question deleted',
'Survey Question updated': 'Survey Question updated',
'Survey Section': 'Survey Section',
'Survey Section Details': 'Survey Section Details',
'Survey Section Display Name': 'Survey Section Display Name',
'Survey Section added': 'Đã thêm khu vực khảo sát',
'Survey Section deleted': 'Survey Section deleted',
'Survey Section updated': 'Cập nhật khu vực khảo sát',
'Survey Series': 'Survey Series',
'Survey Series Details': 'Survey Series Details',
'Survey Series Name': 'Survey Series Name',
'Survey Series added': 'Survey Series added',
'Survey Series deleted': 'Survey Series deleted',
'Survey Series updated': 'Đã cập nhật serie khảo sát',
'Survey Template': 'Survey Template',
'Survey Template Details': 'Survey Template Details',
'Survey Template added': 'Thêm mẫu Khảo sát',
'Survey Template deleted': 'Survey Template deleted',
'Survey Template updated': 'Survey Template updated',
'Survey Templates': 'Survey Templates',
'Switch this on to use individual CSS/Javascript files for diagnostics during development.': 'Switch this on to use individual CSS/Javascript files for diagnostics during development.',
'Symbology': 'Symbology',
'Sync Conflicts': 'Sync Conflicts',
'Sync History': 'Sync History',
'Sync Now': 'Đồng bộ hóa ngay bây giờ',
'Sync Partners': 'Sync Partners',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.',
'Sync Pools': 'Sync Pools',
'Sync Schedule': 'Sync Schedule',
'Sync Settings': 'Sync Settings',
'Sync process already started on ': 'Sync process already started on ',
'Synchronisation': 'Synchronisation',
'Synchronization': 'Synchronization',
'Synchronization Conflicts': 'Synchronization Conflicts',
'Synchronization Details': 'Synchronization Details',
'Synchronization History': 'Synchronization History',
'Synchronization Peers': 'Synchronization Peers',
'Synchronization Settings': 'Synchronization Settings',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden',
'Synchronization not configured.': 'Synchronization not configured.',
'Synchronization settings updated': 'Synchronization settings updated',
'Syncronisation History': 'Lịch sử đồng bộ hóa',
'System allows the General Public to Report Incidents & have these Tracked.': 'System allows the General Public to Report Incidents & have these Tracked.',
'System allows the tracking & discovery of Items stored in Locations.': 'System allows the tracking & discovery of Items stored in Locations.',
'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'Hệ thống luôn theo sát quá trình làm việc của tất cả các tình nguyện viên trong khu vực bị thiên tai.Hệ thống nắm bắt không chỉ vị trí hoạt động của họ mà còn cả thông tin về các dịch vụ mà họ đang cung cấp ở mỗi khu vực.',
"System's Twitter account updated": 'Cập nhật tài khoản Twitter của hệ thống',
'Table name': 'Table name',
'Tags': 'Tags',
'Take shelter in place or per <instruction>': 'Take shelter in place or per <instruction>',
'Task Details': 'Task Details',
'Task List': 'Task List',
'Task Status': 'Task Status',
'Task added': 'Đã thêm Nhiệm vụ',
'Task deleted': 'Đã xóa Nhiệm vụ',
'Task status': 'Task status',
'Task updated': 'Đã cập nhật nhiệm vụ',
'Tasks': 'Tasks',
'Team': 'Team',
'Team Description': 'Team Description',
'Team Details': 'Team Details',
'Team Head': 'Team Head',
'Team Id': 'Team Id',
'Team Leader': 'Đội trưởng',
'Team Member added': 'Thành viên đội đã được thêm',
'Team Members': 'Team Members',
'Team Name': 'Team Name',
'Team Type': 'Loại Đội',
'Team added': 'Đội đã được thêm',
'Team deleted': 'Team deleted',
'Team updated': 'Team updated',
'Teams': 'Teams',
'Technical testing only, all recipients disregard': 'Technical testing only, all recipients disregard',
'Telecommunications': 'Telecommunications',
'Telephone': 'Telephone',
'Telephony': 'Đường điện thoại',
'Temp folder %s not writable - unable to apply theme!': 'Temp folder %s not writable - unable to apply theme!',
'Template file %s not readable - unable to apply theme!': 'Template file %s not readable - unable to apply theme!',
'Templates': 'Templates',
'Terrorism': 'Terrorism',
'Tertiary Server (Optional)': 'Tertiary Server (Optional)',
'Test Results': 'Test Results',
'Text': 'Văn bản',
'Text Colour for Text blocks': 'Màu vản bản cho khối văn bản',
'Text before each Text Field (One per line)': 'Text before each Text Field (One per line)',
'Text in Message': 'Text in Message',
'Text in Message: ': 'Text in Message: ',
'Thanks for your assistance': 'Thanks for your assistance',
'The': 'The',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.',
'The Area which this Site is located within.': 'Xác định khu vực site này định vị trong đó',
'The Assessments module allows field workers to send in assessments.': 'The Assessments module allows field workers to send in assessments.',
'The Author of this Document (optional)': 'The Author of this Document (optional)',
'The Current Location of the Person, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Current Location of the Person, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The District for this Report.': 'The District for this Report.',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.",
'The Group whose members can edit data in this record.': 'The Group whose members can edit data in this record.',
'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Office this record is associated with.': 'The Office this record is associated with.',
'The Organization this record is associated with.': 'The Organization this record is associated with.',
'The Organization which is funding this Activity.': 'The Organization which is funding this Activity.',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.',
'The Rapid Assessments Module stores structured reports done by Professional Organizations.': 'The Rapid Assessments Module stores structured reports done by Professional Organizations.',
'The Request this record is associated with.': 'The Request this record is associated with.',
'The Role this person plays within this Office/Project.': 'The Role this person plays within this Office/Project.',
'The Role this person plays within this hospital.': 'Vai trò của người này trong bệnh viện',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": "The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.",
'The Shelter this Request is from (optional).': 'The Shelter this Request is from (optional).',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': 'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": "The URL of the image file. If you don't upload an image file, then you must specify its location here.",
'The URL of your web gateway without the post parameters': 'The URL of your web gateway without the post parameters',
'The URL to access the service.': 'The URL to access the service.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'The Unique Identifier (UUID) as assigned to this facility by the government.',
'The attribute within the KML which is used for the title of popups.': 'The attribute within the KML which is used for the title of popups.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)',
'The body height (crown to heel) in cm.': 'The body height (crown to heel) in cm.',
'The category of the Item.': 'The category of the Item.',
'The contact person for this organization.': 'Người chịu trách nhiệm liên lạc cho tổ chức này',
'The country the person usually lives in.': 'The country the person usually lives in.',
'The duplicate record will be deleted': 'The duplicate record will be deleted',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': 'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.',
'The first or only name of the person (mandatory).': 'The first or only name of the person (mandatory).',
'The following modules are available': 'The following modules are available',
'The hospital this record is associated with.': 'Bệnh viện lưu hồ sơ này',
'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': 'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.',
'The language to use for notifications.': 'The language to use for notifications.',
'The last known location of the missing person before disappearance.': 'The last known location of the missing person before disappearance.',
'The list of Item categories are maintained by the Administrators.': 'Danh sách category hàng hóa được quản trị viên quản lý',
'The name to be used when calling for or directly addressing the person (optional).': 'The name to be used when calling for or directly addressing the person (optional).',
'The next screen will allow you to detail the number of people here & their needs.': 'The next screen will allow you to detail the number of people here & their needs.',
'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': 'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.',
'The person at the location who is reporting this incident (optional)': 'The person at the location who is reporting this incident (optional)',
'The person reporting about the missing person.': 'Người báo cáo về người mất tích',
'The person reporting the missing person.': 'The person reporting the missing person.',
"The person's manager within this Office/Project.": 'Quản lý của một cá nhân trong Văn phòng/Dự án',
'The post variable containing the phone number': 'The post variable containing the phone number',
'The post variable on the URL used for sending messages': 'Bài viết thay đổi trên URL dùng để gửi tin nhắn',
'The post variables other than the ones containing the message and the phone number': 'The post variables other than the ones containing the message and the phone number',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': 'Các chính sách đơn giản cho phép người dùng ẩn danh đọc và đăng ký để chỉnh sửa. Các chính sách bảo mật đầy đủ cho phép quản trị viên thiết lập phân quyền trên các bảng cá nhân hay - xem mô hình / zzz.py.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>',
'The title of the WMS Browser panel in the Tools panel.': 'The title of the WMS Browser panel in the Tools panel.',
'The token associated with this application on': 'The token associated with this application on',
'The unique identifier which identifies this instance to other instances.': 'The unique identifier which identifies this instance to other instances.',
'The weight in kg.': 'The weight in kg.',
'Theme': 'Chủ đề',
'Theme Details': 'Theme Details',
'Theme added': 'Theme added',
'Theme deleted': 'Theme deleted',
'Theme updated': 'Theme updated',
'Themes': 'Themes',
'There are errors': 'There are errors',
'There was a problem, sorry, please try again later.': 'There was a problem, sorry, please try again later.',
'These are settings for Inbound Mail.': 'Đây là những cài đặt cho thư gửi vào',
'These are the Incident Categories visible to normal End-Users': 'These are the Incident Categories visible to normal End-Users',
'These are the default settings for all users. To change settings just for you, click ': 'These are the default settings for all users. To change settings just for you, click ',
'They': 'Người ta',
'This appears to be a duplicate of ': 'This appears to be a duplicate of ',
'This file already exists on the server as': 'This file already exists on the server as',
'This form allows the administrator to remove a duplicate location.': 'Mẫu này cho phép quản trị viên xóa bỏ các địa điểm trùng',
'This is the way to transfer data between machines as it maintains referential integrity.': 'Đây là cách truyền dữ liệu giữa các máy vì nó bảo toàn tham chiếu',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!',
'This might be due to a temporary overloading or maintenance of the server.': 'Vấn đề này có thể do tình trạng quá tải hoặc máy chủ đang trong thời gian bảo trì',
'This page shows you logs of past syncs. Click on the link below to go to this page.': 'This page shows you logs of past syncs. Click on the link below to go to this page.',
'This screen allows you to upload a collection of photos to the server.': 'Màn hình cho phép bạn upload bộ sưu ảnh lên server',
'Thunderstorm': 'Thunderstorm',
'Thursday': 'Thursday',
'Ticket': 'Ticket',
'Ticket Details': 'Chi tiết Ticket',
'Ticket added': 'Ticket added',
'Ticket deleted': 'Đã xóa Ticket',
'Ticket updated': 'Ticket updated',
'Ticketing Module': 'Ticketing Module',
'Tickets': 'Tickets',
'Time needed to collect water': 'Time needed to collect water',
'Time of Request': 'Thời gian yêu cầu',
'Timestamp': 'Timestamp',
'Title': 'Title',
'To Location': 'To Location',
'To begin the sync process, click the button on the right => ': 'Nhấp chuột vào nút bên phải để kích hoạt quá trình đồng bộ',
'To begin the sync process, click this button => ': 'To begin the sync process, click this button => ',
'To delete': 'To delete',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py',
"To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.",
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": 'Để tìm kiếm một bệnh viện, nhập một phần tên hoặc ID. Có thể sử dụng % như một ký tự thay thế cho một nhóm ký tự. Nhấn "Tìm kiếm" mà không nhập thông tin, sẽ hiển thị toàn bộ các bệnh viện.',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": 'Để tìm kiếm một địa điểm, nhập tên. Có thể sử dụng ký tự % để thay thế cho một nhóm ký tự. Nhấn "Tìm kiếm" mà không nhập thông tin sẽ hiển thị tất cả các địa điểm.',
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'Để tìm kiếm một người,bạn có thể nhập tên, tên đệm hay họ và/hoặc số chứng minh thư của người đó viết cách nhau.Bạn có thể dùng % a làm ký tự đại diện',
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": "To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.",
'To submit a new job, use the': 'To submit a new job, use the',
'To variable': 'Thay đổi',
'Tools': 'Tools',
'Tornado': 'Lốc xoáy',
'Total # of Beneficiaries Reached ': 'Total # of Beneficiaries Reached ',
'Total # of Target Beneficiaries': 'Tổng số # đối tượng hưởng lợi',
'Total # of households of site visited': 'Total # of households of site visited',
'Total Beds': 'Total Beds',
'Total Beneficiaries': 'Total Beneficiaries',
'Total Cost per Megabyte': 'Tổng chi phí cho mỗi Megabyte',
'Total Cost per Minute': 'Total Cost per Minute',
'Total Households': 'Total Households',
'Total Monthly': 'Total Monthly',
'Total Monthly Cost': 'Total Monthly Cost',
'Total Monthly Cost: ': 'Total Monthly Cost: ',
'Total One-time Costs': 'Total One-time Costs',
'Total Persons': 'Total Persons',
'Total Recurring Costs': 'Tổng chi phí định kỳ',
'Total Unit Cost': 'Total Unit Cost',
'Total Unit Cost: ': 'Total Unit Cost: ',
'Total Units': 'Total Units',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Tổng số giường bệnh trong bệnh viện này. Tự động cập nhật từ các báo cáo hàng ngày.',
'Total number of houses in the area': 'Tổng số nóc nhà trong khu vực',
'Total number of schools in affected area': 'Total number of schools in affected area',
'Total population of site visited': 'Total population of site visited',
'Totals for Budget:': 'Totals for Budget:',
'Totals for Bundle:': 'Totals for Bundle:',
'Totals for Kit:': 'Totals for Kit:',
'Tourist Group': 'Tourist Group',
'Town': 'Town',
'Traces internally displaced people (IDPs) and their needs': 'Traces internally displaced people (IDPs) and their needs',
'Tracing': 'Đang tìm kiếm',
'Track': 'Dấu viết',
'Track Details': 'Track Details',
'Track deleted': 'Track deleted',
'Track updated': 'Track updated',
'Track uploaded': 'Track uploaded',
'Tracking of Projects, Activities and Tasks': 'Tracking of Projects, Activities and Tasks',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Tracking of basic information on the location, facilities and size of the Shelters',
'Tracks': 'Tracks',
'Tracks requests for aid and matches them against donors who have pledged aid': 'Tracks requests for aid and matches them against donors who have pledged aid',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Tracks the location, distibution, capacity and breakdown of victims in Shelters',
'Traffic Report': 'Traffic Report',
'Transit': 'Transit',
'Transition Effect': 'Transition Effect',
'Transparent?': 'Transparent?',
'Transportation assistance, Rank': 'Transportation assistance, Rank',
'Trauma Center': 'Trauma Center',
'Travel Cost': 'Travel Cost',
'Tree': 'Tree',
'Tropical Storm': 'Tropical Storm',
'Tropo Messaging Token': 'Tropo Messaging Token',
'Tropo Settings': 'Tropo Settings',
'Tropo Voice Token': 'Tropo Voice Token',
'Tropo settings updated': 'Cập nhật cài đặt Tropo',
'Truck': 'Xe tải',
'Try checking the URL for errors, maybe it was mistyped.': 'Thử kiểm tra lỗi trên URL, có thể do gõ sai',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Thử bấm nút refresh/reload hoặc kiểm tra URL',
'Try refreshing the page or hitting the back button on your browser.': 'Try refreshing the page or hitting the back button on your browser.',
'Tsunami': 'Tsunami',
'Tuesday': 'Tuesday',
'Twitter': 'Twitter',
'Twitter ID or #hashtag': 'Twitter ID or #hashtag',
'Twitter Settings': 'Twitter Settings',
'Type': 'Type',
'Type of cause': 'Type of cause',
'Type of latrines': 'Type of latrines',
'Type of place for defecation': 'Type of place for defecation',
'Type of water source before the disaster': 'Type of water source before the disaster',
'Types of health services available': 'Types of health services available',
'Types of water storage containers available': 'Types of water storage containers available',
'UID': 'UID',
'URL': 'URL',
'UTC Offset': 'UTC Offset',
'Unable to parse CSV file!': 'Không thể đọc file CSV',
'Understaffed': 'Understaffed',
'Unidentified': 'Unidentified',
'Unit': 'Unit',
'Unit Bed Capacity': 'Unit Bed Capacity',
'Unit Cost': 'Unit Cost',
'Unit Details': 'Unit Details',
'Unit Name': 'Unit Name',
'Unit Set': 'Unit Set',
'Unit Short Code for e.g. m for meter.': 'Unit Short Code for e.g. m for meter.',
'Unit added': 'Đã thêm đơn vị',
'Unit deleted': 'Unit deleted',
'Unit updated': 'Đơn vị được cập nhật',
'Units': 'Units',
'Units of Measure': 'Units of Measure',
'Unknown': 'Unknown',
'Unknown Peer': 'Unknown Peer',
'Unknown type of facility': 'Unknown type of facility',
'Unresolved Conflicts': 'Unresolved Conflicts',
'Unselect to disable the modem': 'Unselect to disable the modem',
'Unsent': 'Unsent',
'Unsupported data format!': 'Unsupported data format!',
'Unsupported method!': 'Unsupported method!',
'Update': 'Update',
'Update Activity Report': 'Update Activity Report',
'Update Cholera Treatment Capability Information': 'Update Cholera Treatment Capability Information',
'Update Import Job': 'Update Import Job',
'Update Request': 'Cập nhật Yêu cầu',
'Update Service Profile': 'Update Service Profile',
'Update Task Status': 'Update Task Status',
'Update Unit': 'Update Unit',
'Update if Master': 'Update if Master',
'Update if Newer': 'Cập nhật nếu mới hơn',
'Update your current ordered list': 'Update your current ordered list',
'Upload': 'Upload',
'Upload Photos': 'Upload Photos',
'Upload Spreadsheet': 'Upload Spreadsheet',
'Upload Track': 'Upload Track',
'Upload a Spreadsheet': 'Tải một bảng tính lên',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.",
'Urban Fire': 'Urban Fire',
'Urban area': 'Urban area',
'Urdu': 'Urdu',
'Urgent': 'Urgent',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Dùng (...)&(...) thay cho VÀ(AND), (...)|(...) cho HOẶC(OR), and ~(...) cho KHÔNG (NOT) để .đưa ra những câu hỏi phúc tạp',
'Use default': 'Use default',
'Use these links to download data that is currently in the database.': 'Dùng liên kết này để tải dữ liệu hiện có trên cơ sở dữ liệu xuống',
'Use this space to add a description about the Bin Type.': 'Thêm thông tin mô tả loại Bin ở đây',
'Use this space to add a description about the site location.': 'Use this space to add a description about the site location.',
'Use this space to add a description about the warehouse/site.': 'Thêm mô tả nhà kho/site ở đây',
'Use this space to add additional comments and notes about the Site/Warehouse.': 'Viết bình luận và ghi chú về site/nhà kho ở đây',
'Used to import data from spreadsheets into the database': 'Used to import data from spreadsheets into the database',
'User': 'User',
'User Details': 'User Details',
'User ID': 'User ID',
'User Management': 'User Management',
'User Profile': 'User Profile',
'User Requests': 'Yêu cầu của người dùng',
'User Updated': 'Đã cập nhât người dùng',
'User added': 'User added',
'User already has this role': 'User already has this role',
'User deleted': 'Đã xóa người dùng',
'User updated': 'User updated',
'Username': 'Username',
'Users': 'Users',
'Users removed': 'Xóa người dùng',
'Ushahidi': 'Ushahidi',
'Usual food sources in the area': 'Usual food sources in the area',
'Utility, telecommunication, other non-transport infrastructure': 'Utility, telecommunication, other non-transport infrastructure',
'Various Reporting functionalities': 'Various Reporting functionalities',
'Vehicle': 'Vehicle',
'Vehicle Crime': 'Tai nạn giao thông',
'Vehicle Types': 'Loại phương tiện',
'Vendor': 'Vendor',
'Verified': 'Verified',
'Verified?': 'Đã xác nhận?',
'Verify password': 'Verify password',
'Version': 'Phiên bản',
'Very High': 'Very High',
'View Alerts received using either Email or SMS': 'Xem nhắc nhở gửi đến qua email hoặc sms',
'View Fullscreen Map': 'View Fullscreen Map',
'View Image': 'View Image',
'View On Map': 'Hiển thị trên bản đồ',
'View Outbox': 'View Outbox',
'View Requests for Aid': 'Xem Yêu cầu viện trợ',
'View Settings': 'View Settings',
'View Tickets': 'View Tickets',
"View and/or update details of the person's record": 'Xem và/hoặc cập nhật chi tiết mục ghi cá nhân',
'View and/or update their details': 'View and/or update their details',
'View or update the status of a hospital.': 'Xem hoặc cập nhật trạng thái của một bệnh viện',
'View pending requests and pledge support.': 'View pending requests and pledge support.',
'View the hospitals on a map.': 'Hiển thị bệnh viện trên bản đồ',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": "View/Edit the Database directly (caution: doesn't respect the framework rules!)",
'Village': 'Village',
'Village Leader': 'Village Leader',
'Visible?': 'Visible?',
'Visual Recognition': 'Visual Recognition',
'Volcanic Ash Cloud': 'Đám mây tro bụi từ núi lửa',
'Volcanic Event': 'Volcanic Event',
'Volume - Fluids': 'Volume - Fluids',
'Volume - Solids': 'Volume - Solids',
'Volume Capacity': 'Volume Capacity',
'Volume/Dimensions': 'Volume/Dimensions',
'Volunteer Data': 'Dữ liệu tình nguyện viên',
'Volunteer Details': 'Volunteer Details',
'Volunteer Management': 'Volunteer Management',
'Volunteer Project': 'Dự án tình nguyện',
'Volunteer Registration': 'Đăng ký tình nguyện viên',
'Volunteer Registrations': 'Đăng ksy tình nguyện viên',
'Volunteer Request': 'Yêu cầu tình nguyện viên',
'Volunteer added': 'Volunteer added',
'Volunteer deleted': 'Volunteer deleted',
'Volunteer details updated': 'Volunteer details updated',
'Volunteer registration added': 'Đã thêm đăng ký tình nguyện viên',
'Volunteer registration deleted': 'Đã xóa đăng ký tình nguyện viên',
'Volunteer registration updated': 'Đã cập nhật đăng ký tình nguyện viên',
'Volunteers': 'Tình nguyện viên',
'Volunteers were notified!': 'Volunteers were notified!',
'Vote': 'Vote',
'Votes': 'Votes',
'WASH': 'WASH',
'WMS Browser Name': 'WMS Browser Name',
'WMS Browser URL': 'WMS Browser URL',
'Walking Only': 'Walking Only',
'Walking time to the health service': 'Walking time to the health service',
'Warehouse': 'Warehouse',
'Warehouse Details': 'Warehouse Details',
'Warehouse Item Details': 'Warehouse Item Details',
'Warehouse Item added': 'Warehouse Item added',
'Warehouse Item deleted': 'Warehouse Item deleted',
'Warehouse Item updated': 'Warehouse Item updated',
'Warehouse Items': 'Warehouse Items',
'Warehouse Management': 'Quản lý kho hàng',
'Warehouse added': 'Warehouse added',
'Warehouse deleted': 'Warehouse deleted',
'Warehouse updated': 'Warehouse updated',
'Warehouse/Sites Registry': 'Warehouse/Sites Registry',
'Warehouses': 'Warehouses',
'WatSan': 'WatSan',
'Water': 'Water',
'Water Sanitation Hygiene': 'Water Sanitation Hygiene',
'Water gallon': 'Ga-lông nước',
'Water storage containers available for HH': 'Water storage containers available for HH',
'Water storage containers sufficient per HH': 'Water storage containers sufficient per HH',
'Water supply': 'Water supply',
'Waterspout': 'Waterspout',
'Way Bill(s)': 'Hóa đơn thu phí đường bộ',
'We have tried': 'We have tried',
'Website': 'Website',
'Wednesday': 'Wednesday',
'Weekly': 'Weekly',
'Weight': 'Weight',
'Weight (kg)': 'Khối lượng',
'Welcome to the Sahana Eden Disaster Management System': 'Welcome to the Sahana Eden Disaster Management System',
'Welcome to the Sahana Portal at ': 'Welcome to the Sahana Portal at ',
'Well-Known Text': 'Well-Known Text',
'Were basic medical supplies available for health services prior to the disaster?': 'Were basic medical supplies available for health services prior to the disaster?',
'Were breast milk substitutes used prior to the disaster?': 'Were breast milk substitutes used prior to the disaster?',
'Were there cases of malnutrition in this area prior to the disaster?': 'Were there cases of malnutrition in this area prior to the disaster?',
'Were there health services functioning for the community prior to the disaster?': 'Were there health services functioning for the community prior to the disaster?',
'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?': 'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?',
'What are the factors affecting school attendance?': 'What are the factors affecting school attendance?',
"What are the people's normal ways to obtain food in this area?": "What are the people's normal ways to obtain food in this area?",
'What are your main sources of cash to restart your business?': 'What are your main sources of cash to restart your business?',
'What are your main sources of income now?': 'What are your main sources of income now?',
'What do you spend most of your income on now?': 'What do you spend most of your income on now?',
'What food stocks exist? (main dishes)': 'What food stocks exist? (main dishes)',
'What food stocks exist? (side dishes)': 'What food stocks exist? (side dishes)',
'What is the estimated total number of people in all of these institutions?': 'What is the estimated total number of people in all of these institutions?',
'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': 'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?',
'What is your major source of drinking water?': 'What is your major source of drinking water?',
"What should be done to reduce women and children's vulnerability to violence?": "What should be done to reduce women and children's vulnerability to violence?",
'What type of latrines are available in the village/IDP centre/Camp?': 'What type of latrines are available in the village/IDP centre/Camp?',
'What type of salvage material can be used from destroyed houses?': 'What type of salvage material can be used from destroyed houses?',
'What type of salvage material can be used from destroyed schools?': 'What type of salvage material can be used from destroyed schools?',
'What types of health problems do children currently have?': 'What types of health problems do children currently have?',
'What types of health problems do people currently have?': 'What types of health problems do people currently have?',
'What types of health services are still functioning in the affected area?': 'What types of health services are still functioning in the affected area?',
'What types of household water storage containers are available?': 'What types of household water storage containers are available?',
'What were your main sources of income before the disaster?': 'What were your main sources of income before the disaster?',
'Wheat': 'Wheat',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": "When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.",
'Where are the alternative places for studying?': 'Where are the alternative places for studying?',
'Where are the separated children originally from?': 'Where are the separated children originally from?',
'Where do the majority of people defecate?': 'Where do the majority of people defecate?',
'Where have the children been sent?': 'Where have the children been sent?',
'Where is solid waste disposed in the village/camp?': 'Where is solid waste disposed in the village/camp?',
'Whiskers': 'Whiskers',
'Who is doing what and where': 'Who is doing what and where',
'Who usually collects water for the family?': 'Ai là người thường đi lấy nước cho cả gia đình',
'Width': 'Độ rộng',
'Wild Fire': 'Wild Fire',
'Wind Chill': 'Wind Chill',
'Window frame': 'Window frame',
'Winter Storm': 'Winter Storm',
'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?': 'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?',
'Women of Child Bearing Age': 'Women of Child Bearing Age',
'Women participating in coping activities': 'Women participating in coping activities',
'Women who are Pregnant or in Labour': 'Women who are Pregnant or in Labour',
'Womens Focus Groups': 'Womens Focus Groups',
'Wooden plank': 'Wooden plank',
'Wooden poles': 'Wooden poles',
'Working hours end': 'Hết giờ làm việc',
'Working hours start': 'Bắt đầu giờ làm việc',
'Working or other to provide money/food': 'Working or other to provide money/food',
'Would you like to display the photos on the map?': 'Would you like to display the photos on the map?',
'X-Ray': 'X-Ray',
'XMPP': 'XMPP',
'Yes': 'Yes',
'You are attempting to delete your own account - are you sure you want to proceed?': 'You are attempting to delete your own account - are you sure you want to proceed?',
'You are currently reported missing!': 'You are currently reported missing!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.',
'You can click on the map below to select the Lat/Lon fields:': 'You can click on the map below to select the Lat/Lon fields:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': 'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.',
'You can select the Draw tool (': 'You can select the Draw tool (',
'You can set the modem settings for SMS here.': 'Bạn có thể thiết lập cài đặt modem cho SMS ở đây',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ": 'Bạn đã thiết lập các cài đặt cá nhân, vì vậy bạn không xem được các thay đổi ở đây.Để thiết lập lại, nhấp chuột vào',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.",
"You haven't made any calculations": "You haven't made any calculations",
'You must be logged in to register volunteers.': 'You must be logged in to register volunteers.',
'You must be logged in to report persons missing or found.': 'You must be logged in to report persons missing or found.',
'You must provide a series id to proceed.': 'Bạn phải nhập số id của serie để thao tác tiếp',
'You should edit Twitter settings in models/000_config.py': 'Bạn có thể chỉnh sửa cài đặt Twitter tại models/000_config.py',
'Your action is required. Please approve user %s asap: ': 'Your action is required. Please approve user %s asap: ',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Your current ordered list of solution items is shown below. You can change it by voting again.',
'Your post was added successfully.': 'Bạn đã gửi thông tin thành công',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.',
'ZIP/Postcode': 'ZIP/Postcode',
'Zinc roof': 'Zinc roof',
'Zoom': 'Zoom',
'Zoom Levels': 'Zoom Levels',
'act': 'act',
'active': 'đang hoạt động',
'added': 'added',
'all records': 'all records',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'allows for creation and management of surveys to assess the damage following a natural disaster.',
'an individual/team to do in 1-2 days': 'an individual/team to do in 1-2 days',
'approved': 'approved',
'assigned': 'đã phân công',
'average': 'trung bình',
'black': 'màu đen',
'blond': 'blond',
'blue': 'blue',
'brown': 'brown',
'c/o Name': 'c/o Name',
'can be used to extract data from spreadsheets and put them into database tables.': 'có thể dùng để trích xuất dữ liệu từ bẳng tính đưa vào cơ sở dữ liệu',
'cancelled': 'cancelled',
'caucasoid': 'caucasoid',
'check all': 'check all',
'click for more details': 'click for more details',
'collateral event': 'collateral event',
'completed': 'completed',
'consider': 'consider',
'constraint_id': 'constraint_id',
'criminal intent': 'criminal intent',
'crud': 'crud',
'curly': 'curly',
'currently registered': 'currently registered',
'daily': 'hàng ngày',
'dark': 'dark',
'data uploaded': 'đã upload dữ liệu',
'database': 'database',
'database %s select': 'chọn cơ sở dữ liệu %s',
'db': 'db',
'delete all checked': 'delete all checked',
'deleted': 'deleted',
'denied': 'denied',
'description': 'description',
'design': 'design',
'diseased': 'diseased',
'displaced': 'displaced',
'divorced': 'divorced',
'done!': 'done!',
'edit': 'edit',
'editor': 'người biên tập',
'embedded': 'embedded',
'enclosed area': 'enclosed area',
'export as csv file': 'chuyển đổi file csv',
'fat': 'fat',
'feedback': 'phản hồi',
'female': 'female',
'final report': 'final report',
'flush latrine with septic tank': 'flush latrine with septic tank',
'follow-up assessment': 'follow-up assessment',
'forehead': 'forehead',
'form data': 'form data',
'from Twitter': 'from Twitter',
'from_id': 'from_id',
'full': 'full',
'getting': 'getting',
'green': 'green',
'grey': 'grey',
'here': 'ở đây',
'high': 'high',
'hourly': 'hourly',
'households': 'households',
'human error': 'human error',
'identified': 'identified',
'ignore': 'ignore',
'immediately': 'immediately',
'in Deg Min Sec format': 'in Deg Min Sec format',
'in GPS format': 'Ở định dạng GPS',
'inactive': 'inactive',
'initial assessment': 'initial assessment',
'injured': 'injured',
'insert new': 'chèn mới',
'insert new %s': 'insert new %s',
'invalid request': 'yêu cầu không hợp lệ',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'là trung tâm thông tin trực tuyến, nơi lưu trữ thông tin về các nạn nhân và gia đình chịu ảnh hưởng của thiên tai, đặc biệt là xác định con số thương vong và lượng người sơ tán.Thông tin như tên, tuổi, số điện thoại, số CMND, nơi sơ tán và các thông tin khác cũng được lưu lại.Ảnh và dấu vân tay cũng có thể tải lên hệ thống.Để hiệu quả và tiện lợi hơn có thể quản lý theo nhóm',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'theo dõi ticket gửi đến cho phép người ta phân loại và điều phương tiện cứu trợ tới các nơi hợp lý ',
'kilogram': 'kilogram',
'kit': 'kit',
'latrines': 'latrines',
'legend URL': 'legend URL',
'light': 'light',
'liter': 'liter',
'login': 'Đăng nhập',
'long': 'long',
'long>12cm': 'long>12cm',
'low': 'low',
'male': 'male',
'manual': 'manual',
'married': 'married',
'maxExtent': 'maxExtent',
'maxResolution': 'Độ phân giải tối đa',
'medium': 'medium',
'medium<12cm': 'trung bình dưới 12cm',
'menu item': 'menu item',
'message_id': 'message_id',
'meter': 'meter',
'meter cubed': 'meter cubed',
'meters': 'meters',
'module allows the site administrator to configure various options.': 'Mô-đun cho phép người quản trị site cấu hình các tùy chọn khác nhau',
'module helps monitoring the status of hospitals.': 'module giúp theo dõi tình trạng bệnh viện',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).',
'mongoloid': 'mongoloid',
'more': 'more',
'n/a': 'n/a',
'natural hazard': 'thảm họa thiên nhiên',
'negroid': 'negroid',
'never': 'không bao giờ',
'new': 'Mới',
'new record inserted': 'new record inserted',
'next 100 rows': 'next 100 rows',
'no': 'no',
'none': 'none',
'normal': 'bình thường',
'not needed': 'not needed',
'not specified': 'không xác định',
'num Zoom Levels': 'num Zoom Levels',
'once': 'once',
'open defecation': 'open defecation',
'operational intent': 'operational intent',
'or import from csv file': 'or import from csv file',
'other': 'other',
'over one hour': 'hơn một tiếng',
'pack of 10': 'pack of 10',
'pending': 'pending',
'people': 'con người',
'piece': 'piece',
'pit': 'pit',
'pit latrine': 'pit latrine',
'postponed': 'postponed',
'preliminary template or draft, not actionable in its current form': 'preliminary template or draft, not actionable in its current form',
'previous 100 rows': 'previous 100 rows',
'primary incident': 'primary incident',
'problem connecting to twitter.com - please refresh': 'problem connecting to twitter.com - please refresh',
'provides a catalogue of digital media.': 'cung cấp danh mục các phương tiện truyền thông kỹ thuật số',
'record does not exist': 'record does not exist',
'record id': 'record id',
'records deleted': 'records deleted',
'red': 'red',
'reports successfully imported.': 'import báo cáo thành công',
'retired': 'retired',
'retry': 'retry',
'river': 'river',
'sack 20kg': 'sack 20kg',
'sack 50kg': 'sack 50kg',
'secondary effect': 'secondary effect',
'see comment': 'see comment',
'selected': 'selected',
'separated': 'separated',
'separated from family': 'thất lạc gia đình',
'shaved': 'shaved',
'shift_end': 'shift_end',
'shift_start': 'shift_start',
'short': 'short',
'short<6cm': 'short<6cm',
'sides': 'sides',
'sign-up now': 'sign-up now',
'simple': 'simple',
'single': 'single',
'slim': 'slim',
'state': 'state',
'straight': 'straight',
'suffered financial losses': 'thiệt hại về tài chính',
'table': 'table',
'table_name': 'table_name',
'tall': 'chiều cao',
'technical failure': 'technical failure',
'this': 'this',
'times and it is still not working. We give in. Sorry.': 'times and it is still not working. We give in. Sorry.',
'to access the system': 'to access the system',
'to reset your password': 'to reset your password',
'to verify your email': 'to verify your email',
'to_id': 'to_id',
'ton': 'ton',
'tonsure': 'tonsure',
'total': 'total',
'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!',
'unable to parse csv file': 'unable to parse csv file',
'unapproved': 'unapproved',
'uncheck all': 'uncheck all',
'unidentified': 'unidentified',
'uninhabitable = foundation and structure destroyed': 'uninhabitable = foundation and structure destroyed',
'unknown': 'unknown',
'unspecified': 'unspecified',
'updated': 'đã cập nhật',
'updates only': 'updates only',
'urgent': 'khẩn cấp',
'vm_action': 'vm_action',
'wavy': 'wavy',
'weekly': 'weekly',
'white': 'white',
'wider area, longer term, usually contain multiple Activities': 'wider area, longer term, usually contain multiple Activities',
'widowed': 'widowed',
'window': 'window',
'windows broken, cracks in walls, roof slightly damaged': 'windows broken, cracks in walls, roof slightly damaged',
'within human habitat': 'trong khu dân cư',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt module not available within the running Python - this needs installing for XLS output!',
'yes': 'có',
}
| {
"content_hash": "9b6772561d3586adea569ffaff713c1b",
"timestamp": "",
"source": "github",
"line_count": 4069,
"max_line_length": 951,
"avg_line_length": 60.73212091422954,
"alnum_prop": 0.7284263856684431,
"repo_name": "nicopresto/webSkapes",
"id": "077d8db7daa4c2ab6ee976b06ab6b2b31fc92e37",
"size": "254284",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "languages/vi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "888294"
},
{
"name": "PHP",
"bytes": "15514"
},
{
"name": "Python",
"bytes": "2500005"
},
{
"name": "R",
"bytes": "148"
},
{
"name": "Shell",
"bytes": "543"
}
],
"symlink_target": ""
} |
import os
def ensuredirs(path, mode=0777):
try:
os.makedirs(path, mode)
except OSError:
if not os.path.isdir(path):
raise
| {
"content_hash": "a0840b7fea477091c5b9251b4c6daaad",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 35,
"avg_line_length": 19.875,
"alnum_prop": 0.5786163522012578,
"repo_name": "zielmicha/satori",
"id": "1557dae36221a195b570cab6cbb6360ec984f790",
"size": "192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satori.core/satori/core/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "165337"
},
{
"name": "CSS",
"bytes": "72202"
},
{
"name": "HTML",
"bytes": "56647"
},
{
"name": "Java",
"bytes": "270392"
},
{
"name": "JavaScript",
"bytes": "300430"
},
{
"name": "Makefile",
"bytes": "1223"
},
{
"name": "Perl",
"bytes": "1572"
},
{
"name": "Python",
"bytes": "1011796"
},
{
"name": "Shell",
"bytes": "231478"
},
{
"name": "TeX",
"bytes": "17071"
}
],
"symlink_target": ""
} |
"""cyme.managers
- These are the managers for our models in :mod:`cyme.models`.
- They are not to be used directly, but accessed through
the ``objects`` attribute of a Model.
"""
from __future__ import absolute_import
from anyjson import serialize
from celery import current_app as celery
from djcelery.managers import ExtendedManager
from cyme.utils import cached_property, uuid
class BrokerManager(ExtendedManager):
def get_default(self):
return self.get_or_create(url=self.default_url)[0]
@property
def default_url(self):
return celery.broker_connection().as_uri()
class AppManager(ExtendedManager):
def from_json(self, name=None, broker=None):
return {'name': name, 'broker': self.get_broker(broker)}
def recreate(self, name=None, broker=None, arguments=None,
extra_config=None):
d = self.from_json(name, broker)
return self.get_or_create(name=d['name'],
defaults={'broker': d['broker'],
'arguments': arguments,
'extra_config': extra_config})[0]
def instance(self, name=None, broker=None):
return self.model(**self.from_json(name, broker))
def get_broker(self, url):
return self.Brokers.get_or_create(url=url)[0]
def add(self, name=None, broker=None, arguments=None, extra_config=None):
broker = self.get_broker(broker) if broker else None
return self.get_or_create(name=name, defaults={
'broker': broker,
'arguments': arguments,
'extra_config': extra_config})[0]
def get_default(self):
return self.get_or_create(name='cyme')[0]
@cached_property
def Brokers(self):
return self.model.Broker._default_manager
class InstanceManager(ExtendedManager):
def enabled(self):
return self.filter(is_enabled=True)
def _maybe_queues(self, queues):
if isinstance(queues, basestring):
queues = queues.split(',')
return [(queue.name if isinstance(queue, self.model.Queue) else queue)
for queue in queues]
def add(self, name=None, queues=None, max_concurrency=1,
min_concurrency=1, broker=None, pool=None, app=None,
arguments=None, extra_config=None):
instance = self.create(name=name or uuid(),
max_concurrency=max_concurrency,
min_concurrency=min_concurrency,
pool=pool,
app=app,
arguments=arguments,
extra_config=extra_config)
needs_save = False
if queues:
instance.queues = self._maybe_queues(queues)
needs_save = True
if broker:
instance._broker = broker
needs_save = True
if needs_save:
instance.save()
return instance
def _action(self, name, action, *args, **kwargs):
instance = self.get(name=name)
getattr(instance, action)(*args, **kwargs)
return instance
def remove(self, name):
return self._action(name, 'delete')
def enable(self, name):
return self._action(name, 'enable')
def disable(self, name):
return self._action(name, 'disable')
def remove_queue_from_instances(self, queue, **query):
instances = []
for instance in self.filter(**query).iterator():
if queue in instance.queues:
instance.queues.remove(queue)
instance.save()
instances.append(instance)
return instances
def add_queue_to_instances(self, queue, **query):
instances = []
for instance in self.filter(**query).iterator():
instance.queues.add(queue)
instance.save()
instances.append(instance)
return instances
class QueueManager(ExtendedManager):
def enabled(self):
return self.filter(is_enabled=True)
def _add(self, name, **declaration):
return self.get_or_create(name=name, defaults=declaration)[0]
def add(self, name, exchange=None, exchange_type=None,
routing_key=None, **options):
options = serialize(options) if options else None
return self._add(name, exchange=exchange, exchange_type=exchange_type,
routing_key=routing_key, options=options)
| {
"content_hash": "e5f02d5b8c8caae59c658a2af7ce405f",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 78,
"avg_line_length": 32.52142857142857,
"alnum_prop": 0.5873050735778608,
"repo_name": "celery/cyme",
"id": "a53dbba3e8c3abdf7ee0722c389b9883e16d75ca",
"size": "4553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyme/models/managers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "162774"
},
{
"name": "Shell",
"bytes": "2360"
}
],
"symlink_target": ""
} |
import tfrrs,bs4,requests,cli
| {
"content_hash": "1f7a017bb54429ecfc058cdd9b067f51",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 29,
"avg_line_length": 30,
"alnum_prop": 0.8333333333333334,
"repo_name": "ethanwh/tfrrs",
"id": "ffb7bda4dd7dd2692a015a038f9dc00bbefd29dd",
"size": "30",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfrrs/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5062"
}
],
"symlink_target": ""
} |
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create pipeline
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
pl3d.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
sf = vtk.vtkSplitField()
sf.SetInputData(output)
sf.SetInputField("VECTORS","POINT_DATA")
sf.Split(0,"vx")
sf.Split(1,"vy")
sf.Split(2,"vz")
#sf.Print()
aax = vtk.vtkAssignAttribute()
aax.SetInputConnection(sf.GetOutputPort())
aax.Assign("vx","SCALARS","POINT_DATA")
isoVx = vtk.vtkContourFilter()
isoVx.SetInputConnection(aax.GetOutputPort())
isoVx.SetValue(0,.38)
normalsVx = vtk.vtkPolyDataNormals()
normalsVx.SetInputConnection(isoVx.GetOutputPort())
normalsVx.SetFeatureAngle(45)
isoVxMapper = vtk.vtkPolyDataMapper()
isoVxMapper.SetInputConnection(normalsVx.GetOutputPort())
isoVxMapper.ScalarVisibilityOff()
isoVxMapper.ImmediateModeRenderingOn()
isoVxActor = vtk.vtkActor()
isoVxActor.SetMapper(isoVxMapper)
isoVxActor.GetProperty().SetColor(1,0.7,0.6)
aay = vtk.vtkAssignAttribute()
aay.SetInputConnection(sf.GetOutputPort())
aay.Assign("vy","SCALARS","POINT_DATA")
isoVy = vtk.vtkContourFilter()
isoVy.SetInputConnection(aay.GetOutputPort())
isoVy.SetValue(0,.38)
normalsVy = vtk.vtkPolyDataNormals()
normalsVy.SetInputConnection(isoVy.GetOutputPort())
normalsVy.SetFeatureAngle(45)
isoVyMapper = vtk.vtkPolyDataMapper()
isoVyMapper.SetInputConnection(normalsVy.GetOutputPort())
isoVyMapper.ScalarVisibilityOff()
isoVyMapper.ImmediateModeRenderingOn()
isoVyActor = vtk.vtkActor()
isoVyActor.SetMapper(isoVyMapper)
isoVyActor.GetProperty().SetColor(0.7,1,0.6)
aaz = vtk.vtkAssignAttribute()
aaz.SetInputConnection(sf.GetOutputPort())
aaz.Assign("vz","SCALARS","POINT_DATA")
isoVz = vtk.vtkContourFilter()
isoVz.SetInputConnection(aaz.GetOutputPort())
isoVz.SetValue(0,.38)
normalsVz = vtk.vtkPolyDataNormals()
normalsVz.SetInputConnection(isoVz.GetOutputPort())
normalsVz.SetFeatureAngle(45)
isoVzMapper = vtk.vtkPolyDataMapper()
isoVzMapper.SetInputConnection(normalsVz.GetOutputPort())
isoVzMapper.ScalarVisibilityOff()
isoVzMapper.ImmediateModeRenderingOn()
isoVzActor = vtk.vtkActor()
isoVzActor.SetMapper(isoVzMapper)
isoVzActor.GetProperty().SetColor(0.4,0.5,1)
mf = vtk.vtkMergeFields()
mf.SetInputConnection(sf.GetOutputPort())
mf.SetOutputField("merged","POINT_DATA")
mf.SetNumberOfComponents(3)
mf.Merge(0,"vy",0)
mf.Merge(1,"vz",0)
mf.Merge(2,"vx",0)
#mf.Print()
aa = vtk.vtkAssignAttribute()
aa.SetInputConnection(mf.GetOutputPort())
aa.Assign("merged","SCALARS","POINT_DATA")
aa2 = vtk.vtkAssignAttribute()
aa2.SetInputConnection(aa.GetOutputPort())
aa2.Assign("SCALARS","VECTORS","POINT_DATA")
sl = vtk.vtkStreamTracer()
sl.SetInputConnection(aa2.GetOutputPort())
sl.SetStartPosition(2,-2,26)
sl.SetMaximumPropagation(40)
sl.SetInitialIntegrationStep(0.2)
sl.SetIntegrationDirectionToForward()
rf = vtk.vtkRibbonFilter()
rf.SetInputConnection(sl.GetOutputPort())
rf.SetWidth(1.0)
rf.SetWidthFactor(5)
slMapper = vtk.vtkPolyDataMapper()
slMapper.SetInputConnection(rf.GetOutputPort())
slMapper.ImmediateModeRenderingOn()
slActor = vtk.vtkActor()
slActor.SetMapper(slMapper)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(isoVxActor)
isoVxActor.AddPosition(0,12,0)
ren1.AddActor(isoVyActor)
ren1.AddActor(isoVzActor)
isoVzActor.AddPosition(0,-12,0)
ren1.AddActor(slActor)
slActor.AddPosition(0,24,0)
ren1.AddActor(outlineActor)
outlineActor.AddPosition(0,24,0)
ren1.SetBackground(.8,.8,.8)
renWin.SetSize(321,321)
ren1.GetActiveCamera().SetPosition(-20.3093,20.55444,64.3922)
ren1.GetActiveCamera().SetFocalPoint(8.255,0.0499763,29.7631)
ren1.GetActiveCamera().SetViewAngle(30)
ren1.GetActiveCamera().SetViewUp(0,0,1)
ren1.GetActiveCamera().Dolly(0.4)
ren1.ResetCameraClippingRange()
# render the image
#
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| {
"content_hash": "828413c7a5e6fcd4baaba20dc5c9e5e7",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 66,
"avg_line_length": 33.177304964539005,
"alnum_prop": 0.7736212056434374,
"repo_name": "hlzz/dotfiles",
"id": "4857135157055709794301ac52c3d40ac3c88e76",
"size": "4701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphics/VTK-7.0.0/Filters/General/Testing/Python/splitVectors.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "1240"
},
{
"name": "Arc",
"bytes": "38"
},
{
"name": "Assembly",
"bytes": "449468"
},
{
"name": "Batchfile",
"bytes": "16152"
},
{
"name": "C",
"bytes": "102303195"
},
{
"name": "C++",
"bytes": "155056606"
},
{
"name": "CMake",
"bytes": "7200627"
},
{
"name": "CSS",
"bytes": "179330"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "Emacs Lisp",
"bytes": "14892"
},
{
"name": "FORTRAN",
"bytes": "5276"
},
{
"name": "Forth",
"bytes": "3637"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "438205"
},
{
"name": "Gnuplot",
"bytes": "327"
},
{
"name": "Groff",
"bytes": "518260"
},
{
"name": "HLSL",
"bytes": "965"
},
{
"name": "HTML",
"bytes": "2003175"
},
{
"name": "Haskell",
"bytes": "10370"
},
{
"name": "IDL",
"bytes": "2466"
},
{
"name": "Java",
"bytes": "219109"
},
{
"name": "JavaScript",
"bytes": "1618007"
},
{
"name": "Lex",
"bytes": "119058"
},
{
"name": "Lua",
"bytes": "23167"
},
{
"name": "M",
"bytes": "1080"
},
{
"name": "M4",
"bytes": "292475"
},
{
"name": "Makefile",
"bytes": "7112810"
},
{
"name": "Matlab",
"bytes": "1582"
},
{
"name": "NSIS",
"bytes": "34176"
},
{
"name": "Objective-C",
"bytes": "65312"
},
{
"name": "Objective-C++",
"bytes": "269995"
},
{
"name": "PAWN",
"bytes": "4107117"
},
{
"name": "PHP",
"bytes": "2690"
},
{
"name": "Pascal",
"bytes": "5054"
},
{
"name": "Perl",
"bytes": "485508"
},
{
"name": "Pike",
"bytes": "1338"
},
{
"name": "Prolog",
"bytes": "5284"
},
{
"name": "Python",
"bytes": "16799659"
},
{
"name": "QMake",
"bytes": "89858"
},
{
"name": "Rebol",
"bytes": "291"
},
{
"name": "Ruby",
"bytes": "21590"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "2266191"
},
{
"name": "Slash",
"bytes": "1536"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Swift",
"bytes": "331"
},
{
"name": "Tcl",
"bytes": "1911873"
},
{
"name": "TeX",
"bytes": "11981"
},
{
"name": "Verilog",
"bytes": "3893"
},
{
"name": "VimL",
"bytes": "595114"
},
{
"name": "XSLT",
"bytes": "62675"
},
{
"name": "Yacc",
"bytes": "307000"
},
{
"name": "eC",
"bytes": "366863"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(name="x")
| {
"content_hash": "bfc25db848a6582a019d3ce9db7fadd7",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 28,
"avg_line_length": 22.5,
"alnum_prop": 0.7777777777777778,
"repo_name": "ccwang002/2014-Taipeipy-venv",
"id": "f8ea3023540effcb9bed3b64f87a87086e9c67ae",
"size": "45",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "play_tox/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "389"
}
],
"symlink_target": ""
} |
import web
from libraries.utils import hash_utils
from localsys.storage import db
import localsys
from localsys import storage
import re
import base64
class users_model:
@classmethod
def authorize(cls):
"""
Returns user_id if request is authorized, else 0.
Authorizes by checking the following in order:
1. HTTP Authorization header
2. Session data
"""
if web.ctx.env.get('HTTP_AUTHORIZATION') is None:
# Do not use context cache - will cause infinite recursion.
return storage.session.user_id
else:
auth = re.sub('^Basic ','',web.ctx.env.get('HTTP_AUTHORIZATION'))
username, password = base64.decodestring(auth).split(':')
return users_model.check_credentials(username, password)
@classmethod
def session_login(cls, user_id):
"""
Sets session user_id to parameter.
"""
localsys.storage.session.user_id = user_id
# Can't guarantee this method will be used before cache is initialized.
localsys.environment.context.flush_cache()
return user_id
@classmethod
def get_username(cls, user_id):
"""
Returns username of user given user_id, empty string otherwise.
"""
users = db.select('users', where="user_id=$user_id", vars=locals())
if len(users) == 1:
return users[0].username
else:
return ''
@classmethod
def get_user_id(cls, username):
"""
Returns user_id given username, 0 otherwise.
"""
users = db.select('users', where="username=$username", vars=locals())
if len(users) == 1:
return users[0].user_id
else:
return 0
@classmethod
def check_credentials(cls, username, password):
"""
Returns ID of user if credentials match, 0 otherwise.
"""
password = hash_utils.hash_password(password)
auth = db.select('users', where="username=$username&&password=$password", vars=locals())
if len(auth) == 1:
return auth[0].user_id
else:
return 0
@classmethod
def select_users(cls, user_id=0, username=''):
"""
Returns list of all users with 'username' and 'user_id' (optional) parameters.
"""
if user_id > 0:
if username != '':
return db.select('users', where="username=$username&&user_id=$user_id", vars=locals())
else:
return db.select('users', where="user_id=$user_id", vars=locals())
else:
return db.select('users', where="username=$username", vars=locals())
@classmethod
def register(cls, username, password, email):
"""
Attempts to insert new user data into users table.
Returns ID of user if successfully registered, 0 if user already exists, -1 if database error.
"""
if len(cls.select_users(username=username)) > 0:
return 0
else:
if username == '' or email == '':
return -1
db.insert('users', username=username, email=email, password=hash_utils.hash_password(password))
user_lookup = cls.select_users(username=username)
if len(user_lookup) == 1:
return user_lookup[0].user_id
else:
return -1
@classmethod
def update_password(cls, user_id, password):
"""
Updates password according to specified user_id and new password.
Returns true if updated for one user or password unchanged, false otherwise.
"""
user_list = cls.select_users(user_id=user_id)
password_hash = hash_utils.hash_password(password)
if len(user_list) == 1 and password_hash == user_list[0].password:
return True
if db.update('users', where="user_id=$user_id", password=password_hash, vars=locals()) \
== 1:
return True
return False
@classmethod
def request_password(cls, token, user_id):
"""
Creates password recovery ticket in password_recovery table.
Returns recipient email address if user found, else empty string
"""
user_list = cls.select_users(user_id=user_id)
if len(user_list) == 1:
user = user_list[0]
db.insert('password_recovery', user_id=user.user_id, datetime=web.SQLLiteral('NOW()'), token=token, invalid=0)
return user.email
else:
return ''
@classmethod
def password_recovery_user(cls, token=''):
"""
Return user_id if password request ticket is valid. 0 otherwise.
:param token:
"""
user_list = db.select('password_recovery', where="token=$token&&invalid=0", vars=locals())
if len(user_list) == 1:
return user_list[0].user_id
else:
return 0
@classmethod
def update_recovery_status(cls, token, invalid=1):
"""
Updates password recovery ticket, assuming successful recovery.
Returns true if one row affected, else false.
"""
if db.update('pwrecovery', where="token=$token", invalid=invalid, vars=locals()) == 1:
return True
else:
return False
| {
"content_hash": "83abb59d9eb8374b26c66a7e2f5624ea",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 122,
"avg_line_length": 33.79245283018868,
"alnum_prop": 0.5801228364042434,
"repo_name": "mapto/sprks",
"id": "ef89f3e01a810b4e1535f1466fe6dc6ae056b650",
"size": "5373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/users.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15370"
},
{
"name": "Dockerfile",
"bytes": "327"
},
{
"name": "HTML",
"bytes": "35629"
},
{
"name": "JavaScript",
"bytes": "75183"
},
{
"name": "Python",
"bytes": "124271"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
} |
from panda3d.core import *
from panda3d.direct import *
from toontown.toonbase.ToonBaseGlobal import *
from toontown.toonbase.ToontownGlobals import *
from toontown.distributed.ToontownMsgTypes import *
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.minigame import Purchase
from otp.avatar import DistributedAvatar
import SkyUtil
from direct.task.Task import Task
import Hood
from toontown.estate import EstateLoader
from toontown.estate import HouseGlobals
import ZoneUtil
class EstateHood(Hood.Hood):
notify = DirectNotifyGlobal.directNotify.newCategory('EstateHood')
def __init__(self, parentFSM, doneEvent, dnaStore, hoodId):
Hood.Hood.__init__(self, parentFSM, doneEvent, dnaStore, hoodId)
self.fsm = ClassicFSM.ClassicFSM('Hood', [State.State('start', self.enterStart, self.exitStart, ['safeZoneLoader']),
State.State('safeZoneLoader', self.enterSafeZoneLoader, self.exitSafeZoneLoader, ['quietZone']),
State.State('quietZone', self.enterQuietZone, self.exitQuietZone, ['safeZoneLoader']),
State.State('final', self.enterFinal, self.exitFinal, [])], 'start', 'final')
self.fsm.enterInitialState()
self.id = MyEstate
self.safeZoneLoaderClass = EstateLoader.EstateLoader
self.storageDNAFile = 'phase_5.5/dna/storage_estate.pdna'
self.holidayStorageDNADict = {WINTER_DECORATIONS: ['phase_5.5/dna/winter_storage_estate.pdna'],
WACKY_WINTER_DECORATIONS: ['phase_5.5/dna/winter_storage_estate.pdna'],
HALLOWEEN_PROPS: ['phase_5.5/dna/halloween_props_storage_estate.pdna'],
SPOOKY_PROPS: ['phase_5.5/dna/halloween_props_storage_estate.pdna']}
self.skyFile = 'phase_3.5/models/props/TT_sky'
self.spookySkyFile = 'phase_3.5/models/props/BR_sky'
self.popupInfo = None
return
def load(self):
Hood.Hood.load(self)
def unload(self):
del self.safeZoneLoaderClass
if self.popupInfo:
self.popupInfo.destroy()
self.popupInfo = None
Hood.Hood.unload(self)
return
def enter(self, requestStatus):
hoodId = requestStatus['hoodId']
zoneId = requestStatus['zoneId']
self.accept('kickToPlayground', self.kickToPlayground)
self.fsm.request(requestStatus['loader'], [requestStatus])
def exit(self):
if self.loader:
self.loader.exit()
self.loader.unload()
del self.loader
Hood.Hood.exit(self)
def loadLoader(self, requestStatus):
loaderName = requestStatus['loader']
if loaderName == 'safeZoneLoader':
self.loader = self.safeZoneLoaderClass(self, self.fsm.getStateNamed('safeZoneLoader'), self.loaderDoneEvent)
self.loader.load()
def spawnTitleText(self, zoneId):
pass
def hideTitleTextTask(self, task):
return Task.done
def kickToPlayground(self, retCode):
if retCode == 0:
msg = TTLocalizer.EstateOwnerLeftMessage % HouseGlobals.BOOT_GRACE_PERIOD
self.__popupKickoutMessage(msg)
elif retCode == 1:
zoneId = base.localAvatar.lastHood
self.doneStatus = {'loader': ZoneUtil.getBranchLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': 'teleportIn',
'hoodId': zoneId,
'zoneId': zoneId,
'shardId': None,
'avId': -1}
messenger.send(self.doneEvent)
elif retCode == 2:
zoneId = base.localAvatar.lastHood
self.doneStatus = {'loader': ZoneUtil.getBranchLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': 'teleportIn',
'hoodId': zoneId,
'zoneId': zoneId,
'shardId': None,
'avId': -1}
messenger.send(self.doneEvent)
else:
self.notify.error('unknown reason for exiting estate')
return
def __popupKickoutMessage(self, msg):
if self.popupInfo != None:
self.popupInfo.destroy()
self.popupInfo = None
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))
self.popupInfo = DirectFrame(parent=hidden, relief=None, state='normal', text=msg, frameSize=(-1, 1, -1, 1), text_wordwrap=10, geom=DGG.getDefaultDialogGeom(), geom_color=GlobalDialogColor, geom_scale=(0.88, 1, 0.75), geom_pos=(0, 0, -.08), text_scale=TTLocalizer.EHpopupInfo, text_pos=(0, 0.1))
DirectButton(self.popupInfo, image=okButtonImage, relief=None, text=TTLocalizer.EstatePopupOK, text_scale=0.05, text_pos=(0.0, -0.1), textMayChange=0, pos=(0.0, 0.0, -0.3), command=self.__handleKickoutOk)
buttons.removeNode()
self.popupInfo.reparentTo(aspect2d)
return
def __handleKickoutOk(self):
self.popupInfo.reparentTo(hidden)
def skyTrack(self, task):
return SkyUtil.cloudSkyTrack(task)
def startSky(self):
if not self.sky.getTag('sky') == 'Regular':
self.endSpookySky()
SkyUtil.startCloudSky(self)
if base.cloudPlatformsEnabled:
self.loader.startCloudPlatforms()
def stopSky(self):
Hood.Hood.stopSky(self)
self.loader.stopCloudPlatforms()
def startSpookySky(self):
if hasattr(self, 'loader') and self.loader and hasattr(self.loader, 'cloudTrack') and self.loader.cloudTrack:
self.stopSky()
self.sky = loader.loadModel(self.spookySkyFile)
self.sky.setTag('sky', 'Halloween')
self.sky.setScale(1.0)
self.sky.setDepthTest(0)
self.sky.setDepthWrite(0)
self.sky.setColor(0.5, 0.5, 0.5, 1)
self.sky.setBin('background', 100)
self.sky.setFogOff()
self.sky.reparentTo(camera)
self.sky.setTransparency(TransparencyAttrib.MDual, 1)
fadeIn = self.sky.colorScaleInterval(1.5, Vec4(1, 1, 1, 1), startColorScale=Vec4(1, 1, 1, 0.25), blendType='easeInOut')
fadeIn.start()
self.sky.setZ(0.0)
self.sky.setHpr(0.0, 0.0, 0.0)
ce = CompassEffect.make(NodePath(), CompassEffect.PRot | CompassEffect.PZ)
self.sky.node().setEffect(ce)
| {
"content_hash": "fb7bdfa819238ff892c8a606fbbe4752",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 303,
"avg_line_length": 42.72185430463576,
"alnum_prop": 0.647651526895055,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "4095b013d760f086e72468d6b8e8bdc60d2dd2f0",
"size": "6451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/hood/EstateHood.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
} |
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from png import Reader
from sm.engine.png_generator import PngGenerator
def test_png_gen_greyscale_works():
alpha_ch = np.array([[1, 1, 1]])
gen = PngGenerator(alpha_ch, greyscale=True)
img_data = np.array([[0., 5., 10.]])
norm_img_data = (img_data - img_data.min()) / (img_data.max() - img_data.min())
fp = gen.generate_png(img_data)
reader = Reader(file=fp)
width, height, pixels, _ = reader.asFloat()
assert_equal(width, 3)
assert_equal(height, 1)
grey_shape = img_data.shape + (2,)
assert_almost_equal(np.array(list(pixels)).reshape(grey_shape)[:,:,0], norm_img_data, decimal=4)
| {
"content_hash": "a4907e685d7713359fdcff9df5ae5554",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 100,
"avg_line_length": 32.36363636363637,
"alnum_prop": 0.6587078651685393,
"repo_name": "SpatialMetabolomics/SM_distributed",
"id": "037d974de56a55b9198c4293ea07142b85993373",
"size": "712",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sm/engine/tests/test_png_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1841"
},
{
"name": "HTML",
"bytes": "13584"
},
{
"name": "JavaScript",
"bytes": "33219"
},
{
"name": "Python",
"bytes": "189667"
},
{
"name": "Shell",
"bytes": "3940"
}
],
"symlink_target": ""
} |
import logging
import classad
from adstash.ad_sources.generic import GenericAdSource
from adstash.convert import to_json, unique_doc_id
class FileAdSource(GenericAdSource):
def __init__(self, checkpoint_file=None, **kwargs):
pass
def fetch_ads(self, ad_file):
"""
Generates one ClassAd at a time from ad_file.
Necessary because classad.parseAds()
cannot handle files with "weird" ad separators
(e.g. "**** metadataA=foo metadata2=bar")
"""
try:
with open(ad_file) as f:
ad_string = ""
for line in f:
if line.startswith("***") or line.strip() == "":
if ad_string == "":
continue
else:
yield classad.parseOne(ad_string)
ad_string = ""
ad_string += line
except IOError as e:
logging.error(f"Could not read {adfile}: {str(e)}")
return
except Exception:
logging.exception(f"Error while reading {adfile} ({str(e)}), displaying traceback.")
return
def process_ads(self, interface, ads, metadata={}, chunk_size=0, **kwargs):
chunk = []
for ad in ads:
try:
dict_ad = to_json(ad, return_dict=True)
except Exception as e:
message = f"Failure when converting document in {schedd_ad['name']} history: {str(e)}"
exc = traceback.format_exc()
message += f"\n{exc}"
logging.warning(message)
continue
chunk.append((unique_doc_id(dict_ad), dict_ad,))
if (chunk_size > 0) and (len(chunk) >= chunk_size):
interface.post_ads(chunk, metadata=metadata, **kwargs)
yield
chunk = []
if len(chunk) > 0:
interface.post_ads(chunk, metadata=metadata, **kwargs)
yield
| {
"content_hash": "43f7dd59593b7ac2fa17619add8fe5f2",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 102,
"avg_line_length": 34.59322033898305,
"alnum_prop": 0.506124448799608,
"repo_name": "htcondor/htcondor",
"id": "1d52fd8393bd086a57e7a6d772d90cd93888bd67",
"size": "2688",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/condor_scripts/adstash/ad_sources/ad_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "71055"
},
{
"name": "Awk",
"bytes": "9454"
},
{
"name": "Batchfile",
"bytes": "146264"
},
{
"name": "C",
"bytes": "1651049"
},
{
"name": "C++",
"bytes": "31790435"
},
{
"name": "CMake",
"bytes": "468527"
},
{
"name": "CSS",
"bytes": "9738"
},
{
"name": "Dockerfile",
"bytes": "75955"
},
{
"name": "Fortran",
"bytes": "1279"
},
{
"name": "HTML",
"bytes": "59724"
},
{
"name": "Java",
"bytes": "43977"
},
{
"name": "JavaScript",
"bytes": "130293"
},
{
"name": "M4",
"bytes": "20440"
},
{
"name": "Makefile",
"bytes": "68811"
},
{
"name": "Perl",
"bytes": "3761627"
},
{
"name": "PowerShell",
"bytes": "5412"
},
{
"name": "Python",
"bytes": "1593654"
},
{
"name": "Roff",
"bytes": "2353"
},
{
"name": "Shell",
"bytes": "579393"
},
{
"name": "VBScript",
"bytes": "8734"
},
{
"name": "Yacc",
"bytes": "13532"
}
],
"symlink_target": ""
} |
import os
import unittest
import mock
from unittest import TestCase
from dropbot.bot import DropBot
class DropBotTestCase(TestCase):
def setUp(self):
self.bot = DropBot('test@test.com', 'testpassword')
def call_command(self, command, args=[]):
"""Fakes a call to a bot command"""
msg = {'type': 'groupchat'}
return self.bot.call_command(command, args, msg)
def test_simple_bot(self):
self.assertIsNotNone(self.bot)
def test_system_picker(self):
self.assertEquals(self.bot._system_picker('Jita'), 30000142)
self.assertEquals(self.bot._system_picker('Jit'), 30000142)
self.assertIs(type(self.bot._system_picker('J')), str)
self.assertEqual(self.bot._system_picker('J'), 'More than 10 systems match J, please provide a more complete name')
self.assertEqual(self.bot._system_picker('GE-'), 'Did you mean: GE-94X, GE-8JV?')
self.assertEqual(self.bot._system_picker('asdasd'), 'No systems found matching asdasd')
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_get_evecentral_price(self):
self.assertIs(self.bot._get_evecentral_price(1,1), None)
self.assertIs(type(self.bot._get_evecentral_price(22430, 30000142)), tuple)
def test_cmd_help(self):
res = self.call_command('help')
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_cmd_bestprice(self):
res = self.call_command('bestprice', ['rifter'])
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_cmd_price(self):
res = self.call_command('price', args=['jita', 'rifter'])
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_cmd_jita(self):
res = self.call_command('jita', ['rifter'])
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_cmd_amarr(self):
res = self.call_command('amarr', ['rifter'])
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_cmd_rens(self):
res = self.call_command('rens', ['rifter'])
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_cmd_dodixie(self):
res = self.call_command('dodixie', ['rifter'])
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_cmd_uh(self):
res = self.call_command('uh', ['rifter'])
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_cmd_hek(self):
res = self.call_command('hek', ['rifter'])
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
def test_cmd_r(self):
pass
def test_cmd_redditimg(self):
pass
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_cmd_kos(self):
res = self.call_command('kos', ['Palkark'])
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
def test_cmd_range(self):
res = self.call_command('range', ['U-HVIX'])
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
def test_cmd_route(self):
res = self.call_command('route', ['Jita', 'Amarr'])
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
def test_cmd_addjb(self):
res = self.call_command('addjb', ['Jita', 'Amarr'])
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
self.assertEqual(res[0], 'Done')
def test_cmd_listjbs(self):
res = self.call_command('listjbs')
self.assertIsInstance(res, tuple)
self.assertIsNone(res[0], None)
self.call_command('addjb', ['Jita', 'Amarr'])
res = self.call_command('listjbs')
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
def test_cmd_mapstats(self):
res = self.call_command('mapstats')
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
def test_cmd_hit(self):
pass
def test_cmd_jump(self):
pass
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_cmd_id(self):
pass
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_cmd_kill(self):
pass
def test_cmd_mute(self):
self.assertEqual(self.bot.kills_muted, False)
res = self.call_command('mute')
self.assertIsInstance(res, tuple)
self.assertIsInstance(res[0], basestring)
self.assertEqual(res[0], 'Killmails muted, posting will resume automatically in 30 minutes')
self.assertEqual(self.bot.kills_muted, True)
@unittest.skipIf(os.environ.get('NO_NETWORK', '0') == '1', 'No networking, skipping test')
def test_cmd_nearestoffice(self):
pass
def test_cmd_rageping(self):
pass
def test_jackdaw(self):
"""
The items in the Carnyx release can be found.
"""
self.assertEqual(self.bot._item_picker("Jackdaw"), (u'34828', u'Jackdaw'))
def test_carnyx_plex(self):
self.assertEqual(self.bot._item_picker("plex"), (u"29668", "30 Day Pilot's License Extension (PLEX)"))
| {
"content_hash": "b8642df2050d3df505b2ae2789190e86",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 123,
"avg_line_length": 38.30120481927711,
"alnum_prop": 0.6322743000943692,
"repo_name": "nikdoof/dropbot",
"id": "1905043a9b6e6111e35b25f99323b9a761b96d0e",
"size": "6358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68337"
},
{
"name": "Shell",
"bytes": "761"
}
],
"symlink_target": ""
} |
import datetime
import functools
import json
from django import http
from django.conf import settings
from django.core.exceptions import PermissionDenied
import olympia.core.logger
from . import models as context
task_log = olympia.core.logger.getLogger('z.task')
def login_required(f=None, redirect=True):
"""
Like Django's login_required, but with to= instead of next=.
If redirect=False then we return 401 instead of redirecting to the
login page. That's nice for ajax views.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(request, *args, **kw):
# Prevent circular ref in accounts.utils
from olympia.accounts.utils import redirect_for_login
if request.user.is_authenticated:
return func(request, *args, **kw)
else:
if redirect:
return redirect_for_login(request)
else:
return http.HttpResponse(status=401)
return wrapper
if f:
return decorator(f)
else:
return decorator
def post_required(f):
@functools.wraps(f)
def wrapper(request, *args, **kw):
if request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
else:
return f(request, *args, **kw)
return wrapper
def permission_required(permission):
def decorator(f):
@functools.wraps(f)
@login_required
def wrapper(request, *args, **kw):
from olympia.access import acl
if acl.action_allowed(request, permission):
return f(request, *args, **kw)
else:
raise PermissionDenied
return wrapper
return decorator
def json_response(response, has_trans=False, status_code=200):
"""
Return a response as JSON. If you are just wrapping a view,
then use the json_view decorator.
"""
# to avoid circular imports with users.models
from .utils import AMOJSONEncoder
if has_trans:
response = json.dumps(response, cls=AMOJSONEncoder)
else:
response = json.dumps(response)
return http.HttpResponse(
response, content_type='application/json', status=status_code
)
def json_view(f=None, has_trans=False, status_code=200):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
response = func(*args, **kw)
if isinstance(response, http.HttpResponse):
return response
else:
return json_response(
response, has_trans=has_trans, status_code=status_code
)
return wrapper
if f:
return decorator(f)
else:
return decorator
json_view.error = lambda s: http.HttpResponseBadRequest(
json.dumps(s), content_type='application/json'
)
def use_primary_db(f):
@functools.wraps(f)
def wrapper(*args, **kw):
with context.use_primary_db():
return f(*args, **kw)
return wrapper
def set_modified_on(f):
"""
Will update the modified timestamp on the objects provided through
the `set_modified_on` keyword argument, a short time after the wrapped
function exits successfully (returns a truthy value).
If that function returns a dict, it will also use that dict as additional
keyword arguments to update on the provided objects.
"""
from olympia.amo.tasks import set_modified_on_object
@functools.wraps(f)
def wrapper(*args, **kw):
obj_info = kw.pop('set_modified_on', None)
# obj_info is a tuple in the form of (app_label, model_name, pk)
result = f(*args, **kw)
if obj_info and result:
# If the function returned a dict, pass that dict down as
# kwargs to the set_modified_on_object task. Useful to set
# things like icon hashes.
kwargs_from_result = result if isinstance(result, dict) else {}
task_log.info(
'Delaying setting modified on object: %s, %s'
% (obj_info[0], obj_info[1])
)
# Execute set_modified_on_object in NFS_LAG_DELAY seconds. This
# allows us to make sure any changes have been written to disk
# before changing modification date and/or image hashes stored
# on objects - otherwise we could end up caching an old version
# of an image on CDNs/clients for a very long time.
set_modified_on_object.apply_async(
args=obj_info,
kwargs=kwargs_from_result,
eta=(
datetime.datetime.now()
+ datetime.timedelta(seconds=settings.NFS_LAG_DELAY)
),
)
return result
return wrapper
def allow_cross_site_request(f):
"""Allow other sites to access this resource, see
https://developer.mozilla.org/en/HTTP_access_control."""
@functools.wraps(f)
def wrapper(request, *args, **kw):
response = f(request, *args, **kw)
"""If Access-Control-Allow-Credentials isn't set, the browser won't
return data required cookies to see. This is a good thing, let's keep
it that way."""
response['Access-Control-Allow-Origin'] = '*'
response['Access-Control-Allow-Methods'] = 'GET'
return response
return wrapper
| {
"content_hash": "5e665fd87b96d58d92f165bc3c842781",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 78,
"avg_line_length": 29.863387978142075,
"alnum_prop": 0.605672461116194,
"repo_name": "mozilla/olympia",
"id": "5158e8465b271b0da3ab10f3a364a2464288bd15",
"size": "5465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/amo/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "663668"
},
{
"name": "HTML",
"bytes": "1600904"
},
{
"name": "JavaScript",
"bytes": "1314155"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "3997396"
},
{
"name": "Shell",
"bytes": "9101"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0036_remove-auto-doctype'),
]
operations = [
migrations.CreateModel(
name='HTMLFile',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('projects.importedfile',),
),
]
| {
"content_hash": "b53a304acf7a4b627efb3712e63b6c56",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 49,
"avg_line_length": 20.52173913043478,
"alnum_prop": 0.4872881355932203,
"repo_name": "rtfd/readthedocs.org",
"id": "dee21a140bd0b1c646c533dc0005d5f190018253",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/projects/migrations/0037_add_htmlfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66552"
},
{
"name": "Dockerfile",
"bytes": "205"
},
{
"name": "HTML",
"bytes": "196998"
},
{
"name": "JavaScript",
"bytes": "431128"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1821332"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
} |
import paho.mqtt.client as mqtt
import json
import mqttsqlite.settings.private_settings as Settings
MANAGEMENT_PASSWORD = Settings.QUERY_PASSWORD
MQTT_HOST = Settings.MQTT_HOST
MQTT_PORT = Settings.MQTT_PORT
ROOT_TOPIC = Settings.ROOT_TOPIC
desired_topic = 'salon/humedad'
payload = {}
payload['client'] = 'simple_example'
payload['topic'] = desired_topic
payload['options'] = 20
payload['password'] = MANAGEMENT_PASSWORD
def on_connect(client, userdata, flags, rc):
client_topic = ROOT_TOPIC + 'log/query/minutes'
client.subscribe(ROOT_TOPIC + 'response')
client.publish(client_topic, json.dumps(payload))
def on_message(client, userdata, msg):
received_data = json.loads(msg.payload)
if 'client' in received_data:
if received_data['client'] == payload['client']:
print('Received Message from Logger: ')
print(received_data)
client.disconnect()
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(MQTT_HOST, MQTT_PORT, 60)
client.loop_forever()
| {
"content_hash": "c652e3009911f6ff2eef4afcfd4ea8b6",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 56,
"avg_line_length": 28.07894736842105,
"alnum_prop": 0.7104029990627929,
"repo_name": "rdiaz82/mqttSqlLite",
"id": "b4168883cf99b501bb831650d0683b3047da0387",
"size": "1067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/query_topic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70638"
}
],
"symlink_target": ""
} |
from django.test import SimpleTestCase
from corehq.apps.app_manager.models import Application, AutoSelectCase, AUTO_SELECT_USER, AUTO_SELECT_CASE, \
LoadUpdateAction, AUTO_SELECT_FIXTURE, AUTO_SELECT_RAW, WORKFLOW_MODULE, DetailColumn, WORKFLOW_PREVIOUS
from corehq.apps.app_manager.tests.util import TestFileMixin
from corehq.apps.app_manager.suite_xml import dot_interpolate
from lxml import etree
import commcare_translations
class SuiteTest(SimpleTestCase, TestFileMixin):
file_path = ('data', 'suite')
def assertHasAllStrings(self, app, strings):
et = etree.XML(app)
locale_elems = et.findall(".//locale/[@id]")
locale_strings = [elem.attrib['id'] for elem in locale_elems]
app_strings = commcare_translations.loads(strings)
for string in locale_strings:
if string not in app_strings:
raise AssertionError("App strings did not contain %s" % string)
if not app_strings.get(string, '').strip():
raise AssertionError("App strings has blank entry for %s" % string)
def _test_generic_suite(self, app_tag, suite_tag=None):
suite_tag = suite_tag or app_tag
app = Application.wrap(self.get_json(app_tag))
# print app.create_suite()
self.assertXmlEqual(self.get_xml(suite_tag), app.create_suite())
def _test_app_strings(self, app_tag):
app = Application.wrap(self.get_json(app_tag))
app_xml = app.create_suite()
app_strings = app.create_app_strings('default')
self.assertHasAllStrings(app_xml, app_strings)
def test_normal_suite(self):
self._test_generic_suite('app', 'normal-suite')
def test_tiered_select(self):
self._test_generic_suite('tiered-select', 'tiered-select')
def test_3_tiered_select(self):
self._test_generic_suite('tiered-select-3', 'tiered-select-3')
def test_multisort_suite(self):
self._test_generic_suite('multi-sort', 'multi-sort')
def test_sort_only_value_suite(self):
self._test_generic_suite('sort-only-value', 'sort-only-value')
self._test_app_strings('sort-only-value')
def test_callcenter_suite(self):
self._test_generic_suite('call-center')
def test_careplan_suite(self):
self._test_generic_suite('careplan')
def test_careplan_suite_own_module(self):
app = Application.wrap(self.get_json('careplan'))
app.get_module(1).display_separately = True
self.assertXmlEqual(self.get_xml('careplan-own-module'), app.create_suite())
def test_advanced_suite(self):
self._test_generic_suite('suite-advanced')
def test_advanced_suite_details(self):
app = Application.wrap(self.get_json('suite-advanced'))
clinic_module_id = app.get_module(0).unique_id
other_module_id = app.get_module(1).unique_id
app.get_module(1).get_form(0).actions.load_update_cases[0].details_module = clinic_module_id
app.get_module(1).get_form(1).actions.load_update_cases[0].details_module = other_module_id
self.assertXmlEqual(self.get_xml('suite-advanced-details'), app.create_suite())
def test_advanced_suite_case_list_filter(self):
app = Application.wrap(self.get_json('suite-advanced'))
clinic_module = app.get_module(0)
clinic_module.case_details.short.columns.append(DetailColumn(
header={"en": "Filter"},
format='filter',
filter_xpath=". = 'danny'",
field='filter'
))
clinic_module_id = clinic_module.unique_id
app.get_module(1).get_form(0).actions.load_update_cases[0].details_module = clinic_module_id
self.assertXmlEqual(self.get_xml('suite-advanced-filter'), app.create_suite())
def test_advanced_suite_commtrack(self):
app = Application.wrap(self.get_json('suite-advanced'))
app.commtrack_enabled = True
self.assertXmlEqual(self.get_xml('suite-advanced-commtrack'), app.create_suite())
def test_advanced_suite_auto_select_user(self):
app = Application.wrap(self.get_json('suite-advanced'))
app.get_module(1).get_form(0).actions.load_update_cases[0].auto_select = AutoSelectCase(
mode=AUTO_SELECT_USER,
value_key='case_id'
)
self.assertXmlEqual(self.get_xml('suite-advanced-autoselect-user'), app.create_suite())
def test_advanced_suite_auto_select_fixture(self):
app = Application.wrap(self.get_json('suite-advanced'))
app.get_module(1).get_form(0).actions.load_update_cases[0].auto_select = AutoSelectCase(
mode=AUTO_SELECT_FIXTURE,
value_source='table_tag',
value_key='field_name'
)
self.assertXmlEqual(self.get_xml('suite-advanced-autoselect-fixture'), app.create_suite())
def test_advanced_suite_auto_select_raw(self):
app = Application.wrap(self.get_json('suite-advanced'))
app.get_module(1).get_form(0).actions.load_update_cases[0].auto_select = AutoSelectCase(
mode=AUTO_SELECT_RAW,
value_key=("some xpath expression "
"containing instance('casedb') "
"and instance('commcaresession')")
)
self.assertXmlEqual(self.get_xml('suite-advanced-autoselect-raw'), app.create_suite())
def test_advanced_suite_auto_select_case(self):
app = Application.wrap(self.get_json('suite-advanced'))
load_update_cases = app.get_module(1).get_form(0).actions.load_update_cases
load_update_cases.append(LoadUpdateAction(
case_tag='auto_selected',
auto_select=AutoSelectCase(
mode=AUTO_SELECT_CASE,
value_source=load_update_cases[0].case_tag,
value_key='case_id_index'
)
))
self.assertXmlEqual(self.get_xml('suite-advanced-autoselect-case'), app.create_suite())
def test_advanced_suite_auto_select_with_filter(self):
"""
Form filtering should be done using the last 'non-autoload' case being loaded.
"""
app = Application.wrap(self.get_json('suite-advanced'))
app.get_module(1).get_form(0).actions.load_update_cases.append(LoadUpdateAction(
case_tag='autoload',
auto_select=AutoSelectCase(
mode=AUTO_SELECT_USER,
value_key='case_id'
)
))
form = app.get_module(1).get_form(0)
form.form_filter = "./edd = '123'"
self.assertXmlEqual(self.get_xml('suite-advanced-autoselect-with-filter'), app.create_suite())
def test_case_assertions(self):
self._test_generic_suite('app_case_sharing', 'suite-case-sharing')
def test_no_case_assertions(self):
self._test_generic_suite('app_no_case_sharing', 'suite-no-case-sharing')
def test_picture_format(self):
self._test_generic_suite('app_picture_format', 'suite-picture-format')
def test_audio_format(self):
self._test_generic_suite('app_audio_format', 'suite-audio-format')
def test_attached_picture(self):
self._test_generic_suite('app_attached_image', 'suite-attached-image')
def test_form_workflow_previous(self):
"""
m0 - standard module - no case
f0 - no case management
f1 - no case management
m1 - standard module - patient case
f0 - register case
f1 - update case
m2 - standard module - patient case
f0 - update case
f1 - update case
m3 - standard module - child case
f0 - update child case
f1 - update child case
m4 - advanced module - patient case
f0 - load a -> b
f1 - load a -> b -> c
f2 - load a -> b -> autoselect
"""
self._test_generic_suite('suite-workflow', 'suite-workflow-previous')
def test_form_workflow_module(self):
app = Application.wrap(self.get_json('suite-workflow'))
for module in app.get_modules():
for form in module.get_forms():
form.post_form_workflow = WORKFLOW_MODULE
self.assertXmlEqual(self.get_xml('suite-workflow-module'), app.create_suite())
def test_form_workflow_root(self):
# app = Application.wrap(self.get_json('suite-workflow-root'))
app = Application.wrap(self.get_json('suite-workflow'))
for m in [1, 2]:
module = app.get_module(m)
module.put_in_root = True
self.assertXmlEqual(self.get_xml('suite-workflow-root'), app.create_suite())
def test_owner_name(self):
self._test_generic_suite('owner-name')
def test_form_filter(self):
"""
Ensure form filter gets added correctly and appropriate instances get added to the entry.
"""
app = Application.wrap(self.get_json('suite-advanced'))
form = app.get_module(1).get_form(1)
form.form_filter = "./edd = '123'"
self.assertXmlEqual(self.get_xml('form-filter'), app.create_suite())
class RegexTest(SimpleTestCase):
def testRegex(self):
replacement = "@case_id stuff"
cases = [
('./lmp < 570.5', '%s/lmp < 570.5'),
('stuff ./lmp < 570.', 'stuff %s/lmp < 570.'),
('.53 < hello.', '.53 < hello%s'),
]
for case in cases:
self.assertEqual(
dot_interpolate(case[0], replacement),
case[1] % replacement
) | {
"content_hash": "140942fabd30b040e787eaa8c731425e",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 109,
"avg_line_length": 41.294372294372295,
"alnum_prop": 0.6216584547646504,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "26247b046cf9b5e4bbb8dc71b2e0803204bb4130",
"size": "9539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/app_manager/tests/test_suite.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
} |
"""This code example updates a creative wrapper to the 'OUTER' wrapping order.
To determine which creative wrappers exist, run get_all_creative_wrappers.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CreativeWrapperService.getCreativeWrappersByStatement
Tags: CreativeWrapperService.updateCreativeWrappers
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
# Set the ID of the creative wrapper to update.
CREATIVE_WRAPPER_ID = 'INSERT_CREATIVE_WRAPPER_ID_HERE'
def main(client, creative_wrapper_id):
# Initialize appropriate service.
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v201405')
# Create statement to get a creative wrapper by ID.
values = [{
'key': 'creativeWrapperId',
'value': {
'xsi_type': 'NumberValue',
'value': creative_wrapper_id
}
}]
query = 'WHERE id = :creativeWrapperId'
statement = dfp.FilterStatement(query, values)
# Get creative wrappers.
response = creative_wrapper_service.getCreativeWrappersByStatement(
statement.ToStatement())
if 'results' in response:
updated_creative_wrappers = []
for creative_wrapper in response['results']:
creative_wrapper['ordering'] = 'OUTER'
updated_creative_wrappers.append(creative_wrapper)
# Update the creative wrappers on the server.
creative_wrappers = creative_wrapper_service.updateCreativeWrappers(
updated_creative_wrappers)
# Display results.
for creative_wrapper in creative_wrappers:
print (('Creative wrapper with ID \'%s\' and wrapping order \'%s\' '
'was updated.') % (creative_wrapper['id'],
creative_wrapper['ordering']))
else:
print 'No creative wrappers found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CREATIVE_WRAPPER_ID)
| {
"content_hash": "9d48c68fcc994f4e76a438ecc6b4d59e",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 34.753846153846155,
"alnum_prop": 0.6923417441345728,
"repo_name": "dietrichc/streamline-ppc-reports",
"id": "926cb14f94d6382970cf5d445f00e5e85375ad24",
"size": "2877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dfp/v201405/creative_wrapper_service/update_creative_wrappers.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2235969"
}
],
"symlink_target": ""
} |
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# TLE:
# class Solution(object):
# def hasCycle(self, head):
# """
# :type head: ListNode
# :rtype: bool
# """
# ptr = head
# total_call = 0
# while True:
# if ptr is None:
# break
# call_count = 0
# working_ptr = head
# while call_count < total_call:
# if working_ptr == ptr:
# return True
# else:
# call_count += 1
# working_ptr = working_ptr.next
# ptr = ptr.next
# total_call += 1
# return False
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if head is None or head.next is None:
return False
slow = head
fast = head.next
while True:
if slow == fast:
return True
if fast.next is None or fast.next.next is None or slow.next is None:
return False
slow = slow.next
fast = fast.next.next
s = Solution()
l = None
print(s.hasCycle(l))
l = ListNode(0)
print(s.hasCycle(l))
l.next = ListNode(1)
l.next.next = ListNode(2)
l.next.next.next = ListNode(3)
print(s.hasCycle(l))
l.next.next.next.next = l.next
print(s.hasCycle(l)) | {
"content_hash": "1ad4d82a84a7eed1a86d2ef964cecc82",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 80,
"avg_line_length": 25.413793103448278,
"alnum_prop": 0.4762550881953867,
"repo_name": "heyf/cloaked-octo-adventure",
"id": "7b6ceb657ea7bc0bd9894ac503d6ab8d9c99083f",
"size": "1611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/141_linked-list-cycle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1225"
},
{
"name": "C++",
"bytes": "13338"
},
{
"name": "Python",
"bytes": "145134"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_dynamic_object
OBJECT = {
"name": "Dynamic_Object_1",
"comments": "My Dynamic Object 1",
"color": "yellow"
}
CREATE_PAYLOAD = {
"name": "Dynamic_Object_1",
"comments": "My Dynamic Object 1",
"color": "yellow"
}
UPDATE_PAYLOAD = {
"name": "Dynamic_Object_1"
}
OBJECT_AFTER_UPDATE = UPDATE_PAYLOAD
DELETE_PAYLOAD = {
"name": "Dynamic_Object_1",
"state": "absent"
}
function_path = 'ansible.modules.network.check_point.cp_mgmt_dynamic_object.api_call'
api_call_object = 'dynamic-object'
class TestCheckpointDynamicObject(object):
module = cp_mgmt_dynamic_object
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_create(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert OBJECT.items() == result[api_call_object].items()
def test_create_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
assert OBJECT_AFTER_UPDATE.items() == result[api_call_object].items()
def test_update_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert not result['changed']
def test_delete(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True}
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False}
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| {
"content_hash": "40a77f961f1396e6613bcf8ec4b2fa44",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 109,
"avg_line_length": 33.54639175257732,
"alnum_prop": 0.6760909649661955,
"repo_name": "thaim/ansible",
"id": "c2092a8a0f59efaadca1cd749b8e02792d536736",
"size": "3934",
"binary": false,
"copies": "18",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/check_point/test_cp_mgmt_dynamic_object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import fcntl
import logging
import os
import psutil
from devil import devil_env
from devil.android.constants import file_system
from devil.android.valgrind_tools import base_tool
from devil.utils import cmd_helper
def _GetProcessStartTime(pid):
return psutil.Process(pid).create_time
class _FileLock(object):
"""With statement-aware implementation of a file lock.
File locks are needed for cross-process synchronization when the
multiprocessing Python module is used.
"""
def __init__(self, path):
self._fd = -1
self._path = path
def __enter__(self):
self._fd = os.open(self._path, os.O_RDONLY | os.O_CREAT)
if self._fd < 0:
raise Exception('Could not open file %s for reading' % self._path)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def __exit__(self, _exception_type, _exception_value, traceback):
fcntl.flock(self._fd, fcntl.LOCK_UN)
os.close(self._fd)
class Forwarder(object):
"""Thread-safe class to manage port forwards from the device to the host."""
_DEVICE_FORWARDER_FOLDER = (file_system.TEST_EXECUTABLE_DIR +
'/forwarder/')
_DEVICE_FORWARDER_PATH = (file_system.TEST_EXECUTABLE_DIR +
'/forwarder/device_forwarder')
_LOCK_PATH = '/tmp/chrome.forwarder.lock'
# Defined in host_forwarder_main.cc
_HOST_FORWARDER_LOG = '/tmp/host_forwarder_log'
_instance = None
@staticmethod
def Map(port_pairs, device, tool=None):
"""Runs the forwarder.
Args:
port_pairs: A list of tuples (device_port, host_port) to forward. Note
that you can specify 0 as a device_port, in which case a
port will by dynamically assigned on the device. You can
get the number of the assigned port using the
DevicePortForHostPort method.
device: A DeviceUtils instance.
tool: Tool class to use to get wrapper, if necessary, for executing the
forwarder (see valgrind_tools.py).
Raises:
Exception on failure to forward the port.
"""
if not tool:
tool = base_tool.BaseTool()
with _FileLock(Forwarder._LOCK_PATH):
instance = Forwarder._GetInstanceLocked(tool)
instance._InitDeviceLocked(device, tool)
device_serial = str(device)
redirection_commands = [
['--adb=' + devil_env.config.FetchPath('adb'),
'--serial-id=' + device_serial,
'--map', str(device_port), str(host_port)]
for device_port, host_port in port_pairs]
logging.info('Forwarding using commands: %s', redirection_commands)
for redirection_command in redirection_commands:
try:
(exit_code, output) = cmd_helper.GetCmdStatusAndOutput(
[instance._host_forwarder_path] + redirection_command)
except OSError as e:
if e.errno == 2:
raise Exception('Unable to start host forwarder. Make sure you have'
' built host_forwarder.')
else: raise
if exit_code != 0:
Forwarder._KillDeviceLocked(device, tool)
# Log alive forwarders
ps_out = device.RunShellCommand(['ps'])
logging.info('Currently running device_forwarders:')
for line in ps_out:
if 'device_forwarder' in line:
logging.info(' %s', line)
raise Exception('%s exited with %d:\n%s' % (
instance._host_forwarder_path, exit_code, '\n'.join(output)))
tokens = output.split(':')
if len(tokens) != 2:
raise Exception('Unexpected host forwarder output "%s", '
'expected "device_port:host_port"' % output)
device_port = int(tokens[0])
host_port = int(tokens[1])
serial_with_port = (device_serial, device_port)
instance._device_to_host_port_map[serial_with_port] = host_port
instance._host_to_device_port_map[host_port] = serial_with_port
logging.info('Forwarding device port: %d to host port: %d.',
device_port, host_port)
@staticmethod
def UnmapDevicePort(device_port, device):
"""Unmaps a previously forwarded device port.
Args:
device: A DeviceUtils instance.
device_port: A previously forwarded port (through Map()).
"""
with _FileLock(Forwarder._LOCK_PATH):
Forwarder._UnmapDevicePortLocked(device_port, device)
@staticmethod
def UnmapAllDevicePorts(device):
"""Unmaps all the previously forwarded ports for the provided device.
Args:
device: A DeviceUtils instance.
port_pairs: A list of tuples (device_port, host_port) to unmap.
"""
with _FileLock(Forwarder._LOCK_PATH):
if not Forwarder._instance:
return
adb_serial = str(device)
if adb_serial not in Forwarder._instance._initialized_devices:
return
port_map = Forwarder._GetInstanceLocked(
None)._device_to_host_port_map
for (device_serial, device_port) in port_map.keys():
if adb_serial == device_serial:
Forwarder._UnmapDevicePortLocked(device_port, device)
# There are no more ports mapped, kill the device_forwarder.
tool = base_tool.BaseTool()
Forwarder._KillDeviceLocked(device, tool)
@staticmethod
def DevicePortForHostPort(host_port):
"""Returns the device port that corresponds to a given host port."""
with _FileLock(Forwarder._LOCK_PATH):
_, device_port = Forwarder._GetInstanceLocked(
None)._host_to_device_port_map.get(host_port)
return device_port
@staticmethod
def RemoveHostLog():
if os.path.exists(Forwarder._HOST_FORWARDER_LOG):
os.unlink(Forwarder._HOST_FORWARDER_LOG)
@staticmethod
def GetHostLog():
if not os.path.exists(Forwarder._HOST_FORWARDER_LOG):
return ''
with file(Forwarder._HOST_FORWARDER_LOG, 'r') as f:
return f.read()
@staticmethod
def _GetInstanceLocked(tool):
"""Returns the singleton instance.
Note that the global lock must be acquired before calling this method.
Args:
tool: Tool class to use to get wrapper, if necessary, for executing the
forwarder (see valgrind_tools.py).
"""
if not Forwarder._instance:
Forwarder._instance = Forwarder(tool)
return Forwarder._instance
def __init__(self, tool):
"""Constructs a new instance of Forwarder.
Note that Forwarder is a singleton therefore this constructor should be
called only once.
Args:
tool: Tool class to use to get wrapper, if necessary, for executing the
forwarder (see valgrind_tools.py).
"""
assert not Forwarder._instance
self._tool = tool
self._initialized_devices = set()
self._device_to_host_port_map = dict()
self._host_to_device_port_map = dict()
self._host_forwarder_path = devil_env.config.FetchPath('forwarder_host')
assert os.path.exists(self._host_forwarder_path), 'Please build forwarder2'
self._InitHostLocked()
@staticmethod
def _UnmapDevicePortLocked(device_port, device):
"""Internal method used by UnmapDevicePort().
Note that the global lock must be acquired before calling this method.
"""
instance = Forwarder._GetInstanceLocked(None)
serial = str(device)
serial_with_port = (serial, device_port)
if not serial_with_port in instance._device_to_host_port_map:
logging.error('Trying to unmap non-forwarded port %d', device_port)
return
redirection_command = ['--adb=' + devil_env.config.FetchPath('adb'),
'--serial-id=' + serial,
'--unmap', str(device_port)]
logging.info('Undo forwarding using command: %s', redirection_command)
(exit_code, output) = cmd_helper.GetCmdStatusAndOutput(
[instance._host_forwarder_path] + redirection_command)
if exit_code != 0:
logging.error(
'%s exited with %d:\n%s',
instance._host_forwarder_path, exit_code, '\n'.join(output))
host_port = instance._device_to_host_port_map[serial_with_port]
del instance._device_to_host_port_map[serial_with_port]
del instance._host_to_device_port_map[host_port]
@staticmethod
def _GetPidForLock():
"""Returns the PID used for host_forwarder initialization.
The PID of the "sharder" is used to handle multiprocessing. The "sharder"
is the initial process that forks that is the parent process.
"""
return os.getpgrp()
def _InitHostLocked(self):
"""Initializes the host forwarder daemon.
Note that the global lock must be acquired before calling this method. This
method kills any existing host_forwarder process that could be stale.
"""
# See if the host_forwarder daemon was already initialized by a concurrent
# process or thread (in case multi-process sharding is not used).
pid_for_lock = Forwarder._GetPidForLock()
fd = os.open(Forwarder._LOCK_PATH, os.O_RDWR | os.O_CREAT)
with os.fdopen(fd, 'r+') as pid_file:
pid_with_start_time = pid_file.readline()
if pid_with_start_time:
(pid, process_start_time) = pid_with_start_time.split(':')
if pid == str(pid_for_lock):
if process_start_time == str(_GetProcessStartTime(pid_for_lock)):
return
self._KillHostLocked()
pid_file.seek(0)
pid_file.write(
'%s:%s' % (pid_for_lock, str(_GetProcessStartTime(pid_for_lock))))
pid_file.truncate()
def _InitDeviceLocked(self, device, tool):
"""Initializes the device_forwarder daemon for a specific device (once).
Note that the global lock must be acquired before calling this method. This
method kills any existing device_forwarder daemon on the device that could
be stale, pushes the latest version of the daemon (to the device) and starts
it.
Args:
device: A DeviceUtils instance.
tool: Tool class to use to get wrapper, if necessary, for executing the
forwarder (see valgrind_tools.py).
"""
device_serial = str(device)
if device_serial in self._initialized_devices:
return
Forwarder._KillDeviceLocked(device, tool)
forwarder_device_path_on_host = devil_env.config.FetchPath(
'forwarder_device', device=device)
forwarder_device_path_on_device = (
Forwarder._DEVICE_FORWARDER_FOLDER
if os.path.isdir(forwarder_device_path_on_host)
else Forwarder._DEVICE_FORWARDER_PATH)
device.PushChangedFiles([(
forwarder_device_path_on_host,
forwarder_device_path_on_device)])
cmd = '%s %s' % (tool.GetUtilWrapper(), Forwarder._DEVICE_FORWARDER_PATH)
device.RunShellCommand(
cmd, env={'LD_LIBRARY_PATH': Forwarder._DEVICE_FORWARDER_FOLDER},
check_return=True)
self._initialized_devices.add(device_serial)
def _KillHostLocked(self):
"""Kills the forwarder process running on the host.
Note that the global lock must be acquired before calling this method.
"""
logging.info('Killing host_forwarder.')
(exit_code, output) = cmd_helper.GetCmdStatusAndOutput(
[self._host_forwarder_path, '--kill-server'])
if exit_code != 0:
(exit_code, output) = cmd_helper.GetCmdStatusAndOutput(
['pkill', '-9', 'host_forwarder'])
if exit_code != 0:
raise Exception('%s exited with %d:\n%s' % (
self._host_forwarder_path, exit_code, '\n'.join(output)))
@staticmethod
def _KillDeviceLocked(device, tool):
"""Kills the forwarder process running on the device.
Note that the global lock must be acquired before calling this method.
Args:
device: Instance of DeviceUtils for talking to the device.
tool: Wrapper tool (e.g. valgrind) that can be used to execute the device
forwarder (see valgrind_tools.py).
"""
logging.info('Killing device_forwarder.')
Forwarder._instance._initialized_devices.discard(str(device))
if not device.FileExists(Forwarder._DEVICE_FORWARDER_PATH):
return
cmd = '%s %s --kill-server' % (tool.GetUtilWrapper(),
Forwarder._DEVICE_FORWARDER_PATH)
device.RunShellCommand(
cmd, env={'LD_LIBRARY_PATH': Forwarder._DEVICE_FORWARDER_FOLDER},
check_return=True)
| {
"content_hash": "7339c70800ae4863f57fa16af6fa219c",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 80,
"avg_line_length": 37.784615384615385,
"alnum_prop": 0.6515472312703583,
"repo_name": "XiaosongWei/chromium-crosswalk",
"id": "8794797ffcebba3b7dc033a6d66c28fe379e7527",
"size": "12472",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "build/android/devil/android/forwarder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from locust import HttpUser, task, run_single_user
class QuickstartUser(HttpUser):
host = "http://localhost"
@task
def hello_world(self):
with self.client.get("/hello", catch_response=True) as resp:
pass # maybe set a breakpoint here to analyze the resp object?
# if launched directly, e.g. "python3 debugging.py", not "locust -f debugging.py"
if __name__ == "__main__":
run_single_user(QuickstartUser)
| {
"content_hash": "610d9d1fa0295b02ad203b092f34e0f9",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.6651685393258427,
"repo_name": "mbeacom/locust",
"id": "43e799a80e3e4e78b89e2f7b9d41770095840655",
"size": "445",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/debugging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "732"
},
{
"name": "HTML",
"bytes": "30187"
},
{
"name": "JavaScript",
"bytes": "17229"
},
{
"name": "Makefile",
"bytes": "436"
},
{
"name": "Python",
"bytes": "809070"
},
{
"name": "Sass",
"bytes": "10379"
},
{
"name": "Shell",
"bytes": "3452"
}
],
"symlink_target": ""
} |
"""
File: <interest_rate.py>
Copyright (c) 2016 <Krystal Lee>
License: MIT
<Find the value of euros after 3 years with 5 percent interest with 1000 euros.>
"""
#Exercise 1.6
#Let p be a bank's interest_rate in percent per year. An initial amount A has grown to A(1+p/100)**n after n years.
#Make a program for computing how much money 1000 euros have grown to after three years with 5 percent interest_rate.
#p=interest rate in percent per year
#n=number of years
#A=initial amount
A=1000 #euros
n=3.0 #years
p=5.0 #percent
result = A*(1+p/100)**n
print "After %d years, the value will grow to be %.2f euros with %d percent interest with %d euros." % (n, result, p, A)
| {
"content_hash": "f123fd1087c4581514e7f8a610029bf9",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 120,
"avg_line_length": 29.166666666666668,
"alnum_prop": 0.6914285714285714,
"repo_name": "chapman-cpsc-230/hw1-lee301",
"id": "2eb4e3a693b4db93665e9646811324eb24081b05",
"size": "700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interest_rate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1784"
}
],
"symlink_target": ""
} |
"""
Tests by file. Runs through a list of *.xls files, and expects that the output for a *.xml
with a matching prefix before the . is as expected. Possibly risky: all tests in this file
are defined according to matching files.
"""
from lxml import etree
from formencode.doctest_xml_compare import xml_compare
from unittest import TestCase
import pyxform
from pyxform import xls2json
import os, sys
import utils
import codecs
import sys
class main_test(TestCase):
def runTest(self):
files_to_test = ["instance_xmlns_test.xls"]
for file_to_test in files_to_test:
path_to_excel_file = utils.path_to_text_fixture(file_to_test)
#Get the xform output path:
directory, filename = os.path.split(path_to_excel_file)
root_filename, ext = os.path.splitext(filename)
path_to_output_xform = os.path.join(directory, root_filename + "_output.xml")
path_to_expected_xform = os.path.join(directory, root_filename + ".xml")
#Do the conversion:
json_survey = xls2json.parse_file_to_json(path_to_excel_file)
survey = pyxform.create_survey_element_from_dict(json_survey)
survey.print_xform_to_file(path_to_output_xform)
#Compare with the expected output:
with codecs.open(path_to_expected_xform, 'rb', encoding="utf-8") as expected_file:
expected = etree.fromstring(expected_file.read())
result = etree.fromstring(survey.to_xml())
reporter = lambda x: sys.stdout.write(x + "\n")
self.assertTrue(xml_compare(expected, result, reporter=reporter))
os.remove(path_to_output_xform)
| {
"content_hash": "931c764a7add7e2fc1c1833f3117bbc8",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 94,
"avg_line_length": 42.21951219512195,
"alnum_prop": 0.6452917388792605,
"repo_name": "dorey/pyxform",
"id": "eab88b0d2f119413ac914142ae9e9a8f48c56962",
"size": "1731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyxform/tests/tests_by_file.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "328168"
}
],
"symlink_target": ""
} |
from CIM15.IEC61970.Wires.Conductor import Conductor
class DCLineSegment(Conductor):
"""A wire or combination of wires not insulated from one another, with consistent electrical characteristics, used to carry direct current between points in the DC region of the power system.A wire or combination of wires not insulated from one another, with consistent electrical characteristics, used to carry direct current between points in the DC region of the power system.
"""
def __init__(self, dcSegmentInductance=0.0, dcSegmentResistance=0.0, *args, **kw_args):
"""Initialises a new 'DCLineSegment' instance.
@param dcSegmentInductance: Inductance of the DC line segment.
@param dcSegmentResistance: Resistance of the DC line segment.
"""
#: Inductance of the DC line segment.
self.dcSegmentInductance = dcSegmentInductance
#: Resistance of the DC line segment.
self.dcSegmentResistance = dcSegmentResistance
super(DCLineSegment, self).__init__(*args, **kw_args)
_attrs = ["dcSegmentInductance", "dcSegmentResistance"]
_attr_types = {"dcSegmentInductance": float, "dcSegmentResistance": float}
_defaults = {"dcSegmentInductance": 0.0, "dcSegmentResistance": 0.0}
_enums = {}
_refs = []
_many_refs = []
| {
"content_hash": "2a3c6a132666319d1aef1ef62c1a6c7f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 383,
"avg_line_length": 48.666666666666664,
"alnum_prop": 0.708523592085236,
"repo_name": "rwl/PyCIM",
"id": "7ed1d62656b24bcf4c2144b7c3ca74a9a4daf608",
"size": "2414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61970/Wires/DCLineSegment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
__author__ = 'stanley'
import json
import webapp2
from google.appengine.api import users
from init import *
from domain.user import *
from util.sanity_check import*
class LanguageHandler(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('templates/snippets/languages.html')
self.response.write(template.render())
def put(self):
usr = user_key(users.get_current_user().email()).get()
if not user_is_logged_in(usr):
return
language = Language()
language.id = usr.total_num_of_elems
language.name = str(self.request.get('name')).strip()
if language.name is '': # TODO exception E chizi bedam age khali bood. :-?
return
language.proficiency = str(self.request.get('proficiency')).strip()
if language.proficiency is '': # TODO exception E chizi bedam age khali bood. :-?
return
usr.append_language(language)
usr.total_num_of_elems += 1
usr.put()
self.response.headers['Content-Type'] = 'application/json'
result = json.dumps({'successful': True, 'id': language.id})
self.response.write(result)
def delete(self):
usr = user_key(users.get_current_user().email()).get()
if not user_is_logged_in(usr):
return
if attr_is_not_in_request(self.request, 'l_id'):
return
lang_id = self.request.get('l_id').strip()
try:
lang_id = int(lang_id)
except ValueError:
return
desired = None
for l in usr.languages:
if l.id == lang_id:
desired = l
break
if desired is None:
return
usr.languages.remove(desired)
usr.put()
self.response.headers['Content-Type'] = 'application/json'
result = json.dumps({'successful': True})
self.response.write(result)
def post(self):
usr = user_key(users.get_current_user().email()).get()
if not user_is_logged_in(usr):
return
if attr_is_not_in_request(self.request, 'extra_param'):
status = self.edit(usr)
else:
status = self.sort(usr)
if status > 0:
return
self.response.headers['Content-Type'] = 'application/json'
result = json.dumps({'successful': True})
self.response.write(result)
def sort(self, usr):
if attr_is_not_in_request(self.request, 'sorted_ids'):
return 1
sorted_ids = str(self.request.get('sorted_ids')).strip().split(',')
mapping = dict((str(x.id), x) for x in usr.languages)
usr.languages[:] = [mapping[x] for x in sorted_ids]
usr.put()
return 0
def edit(self, usr):
if attr_is_not_in_request(self.request, 'l_id'):
return 1
l_id = str(self.request.get('l_id')).strip()
try:
l_id = int(l_id)
except ValueError:
return 1
desired = None
for l in usr.languages:
if l.id == l_id:
desired = l
break
if desired is None:
return 1
desired.name = str(self.request.get('name')).strip()
if desired.name is '':
return 1
desired.proficiency = str(self.request.get('proficiency')).strip()
if desired.proficiency is '':
return 1
usr.put()
return 0 | {
"content_hash": "4543343fa1d66ba96a03c207d57708bd",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 90,
"avg_line_length": 27.515625,
"alnum_prop": 0.5545144804088586,
"repo_name": "nimadini/Teammate",
"id": "dddc8af2595175994a9a3f80b61148e65a53f0c4",
"size": "3522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers/home/language.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "89237"
},
{
"name": "HTML",
"bytes": "83898"
},
{
"name": "JavaScript",
"bytes": "127816"
},
{
"name": "Python",
"bytes": "60887"
}
],
"symlink_target": ""
} |
"""
Feature extractor class for Speech2Text
"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
logger = logging.get_logger(__name__)
class Speech2TextFeatureExtractor(SequenceFeatureExtractor):
r"""
Constructs a Speech2Text feature extractor.
This feature extractor inherits from [`Speech2TextFeatureExtractor`] which contains most of the main methods. Users
should refer to this superclass for more information regarding those methods.
This class extracts mel-filter bank features from raw speech using TorchAudio and applies utterance-level cepstral
mean and variance normalization to the extracted features.
Args:
feature_size (`int`, defaults to 80):
The feature dimension of the extracted features.
sampling_rate (`int`, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in Hertz per second (Hz).
num_mel_bins (`int`, defaults to 80):
Number of Mel-frequency bins.
padding_value (`float`, defaults to 0.0):
The value that is used to fill the padding vectors.
do_ceptral_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to apply utterance-level cepstral mean and variance normalization to extracted features.
normalize_means (`bool`, *optional*, defaults to `True`):
Whether or not to zero-mean normalize the extracted features.
normalize_vars (`bool`, *optional*, defaults to `True`):
Whether or not to unit-variance normalize the extracted features.
"""
model_input_names = ["input_features", "attention_mask"]
def __init__(
self,
feature_size=80,
sampling_rate=16000,
num_mel_bins=80,
padding_value=0.0,
do_ceptral_normalize=True,
normalize_means=True,
normalize_vars=True,
**kwargs
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.num_mel_bins = num_mel_bins
self.do_ceptral_normalize = do_ceptral_normalize
self.normalize_means = normalize_means
self.normalize_vars = normalize_vars
self.return_attention_mask = True
def _extract_fbank_features(
self,
waveform: np.ndarray,
) -> np.ndarray:
"""
Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
and hence the waveform should not be normalized before feature extraction.
"""
waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
waveform = torch.from_numpy(waveform).unsqueeze(0)
features = ta_kaldi.fbank(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def utterance_cmvn(
x: np.ndarray,
input_length: int,
normalize_means: Optional[bool] = True,
normalize_vars: Optional[bool] = True,
padding_value: float = 0.0,
) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
mean = x[:input_length].mean(axis=0)
x = np.subtract(x, mean)
if normalize_vars:
std = x[:input_length].std(axis=0)
x = np.divide(x, std)
if input_length < x.shape[0]:
x[input_length:] = padding_value
# make sure array is in float32
x = x.astype(np.float32)
return x
def normalize(
self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray] = None
) -> List[np.ndarray]:
lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(x, n, self.normalize_means, self.normalize_vars, self.padding_value)
for x, n in zip(input_features, lengths)
]
def __call__(
self,
raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
padding: Union[bool, str, PaddingStrategy] = False,
max_length: Optional[int] = None,
truncation: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
sampling_rate: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
**kwargs
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
>= 7.5 (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
<Tip>
For Speech2TextTransoformer models, `attention_mask` should alwys be passed for batched inference, to
avoid subtle bugs.
</Tip>
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
padding_value (`float`, defaults to 0.0):
The value that is used to fill the padding values / vectors.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug."
)
is_batched = bool(
isinstance(raw_speech, (list, tuple))
and (isinstance(raw_speech[0], np.ndarray) or isinstance(raw_speech[0], (tuple, list)))
)
if is_batched:
raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
elif not is_batched and not isinstance(raw_speech, np.ndarray):
raw_speech = np.asarray(raw_speech, dtype=np.float32)
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
raw_speech = raw_speech.astype(np.float32)
# always return batch
if not is_batched:
raw_speech = [raw_speech]
# extract fbank features
features = [self._extract_fbank_features(waveform) for waveform in raw_speech]
# convert into correct format for padding
encoded_inputs = BatchFeature({"input_features": features})
padded_inputs = self.pad(
encoded_inputs,
padding=padding,
max_length=max_length,
truncation=truncation,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
**kwargs,
)
# make sure list is in array format
input_features = padded_inputs.get("input_features")
if isinstance(input_features[0], list):
padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features]
attention_mask = padded_inputs.get("attention_mask")
if attention_mask is not None:
padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
attention_mask = (
np.array(attention_mask, dtype=np.int32)
if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
else None
)
padded_inputs["input_features"] = self.normalize(
padded_inputs["input_features"], attention_mask=attention_mask
)
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
| {
"content_hash": "7a3372d43e2fb4c821da942fba34ccac",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 119,
"avg_line_length": 44.65573770491803,
"alnum_prop": 0.6209618208516887,
"repo_name": "huggingface/transformers",
"id": "af605626d0a848a5bd79ff7e9277d91f805a5743",
"size": "11501",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
} |
import h5py
import mock
import os
import numpy as np
import pytest
from .mockdata import write_file
from .mockdata.xgm import XGM
from karabo_data import run_files_map, RunDirectory
def test_candidate_paths(tmp_path):
# 'real' paths (like /gpfs/exfel/d)
prop_raw_path = tmp_path / 'raw' / 'FXE' / '201901' / 'p001234'
run_dir = prop_raw_path / 'r0450'
run_dir.mkdir(parents=True)
# stable paths (like /gpfs/exfel/exp)
exp = tmp_path / 'exp'
prop_dir = exp / 'FXE' / '201901' / 'p001234'
prop_scratch = exp / 'FXE' / '201901' / 'p001234' / 'scratch'
prop_scratch.mkdir(parents=True)
(prop_dir / 'raw').symlink_to(prop_raw_path)
run_in_exp = prop_dir / 'raw' / 'r0450'
with mock.patch.object(run_files_map, 'SCRATCH_ROOT_DIR', str(exp)):
rfm = run_files_map.RunFilesMap(str(run_dir))
rfm_exp = run_files_map.RunFilesMap(str(run_in_exp))
assert rfm.candidate_paths == [
str(run_dir / 'karabo_data_map.json'),
str(prop_scratch / '.karabo_data_maps' / 'raw_r0450.json'),
]
assert rfm_exp.candidate_paths == [
str(run_in_exp / 'karabo_data_map.json'),
str(prop_scratch / '.karabo_data_maps' / 'raw_r0450.json'),
]
@pytest.fixture()
def run_with_extra_file(mock_fxe_raw_run):
extra_file = os.path.join(mock_fxe_raw_run, 'RAW-R0450-DA02-S00000.h5')
write_file(extra_file, [
XGM('FXE_TEST_XGM/DOOCS/MAIN'),
], ntrains=480)
try:
yield mock_fxe_raw_run, extra_file
finally:
os.unlink(extra_file)
def test_save_load_map(run_with_extra_file, tmp_path):
run_dir, extra_file = run_with_extra_file
run_map_path = str(tmp_path / 'kd_test_run_map.json')
class TestRunFilesMap(run_files_map.RunFilesMap):
def map_paths_for_run(self, directory):
return [run_map_path]
rfm = TestRunFilesMap(run_dir)
assert rfm.files_data == {}
with RunDirectory(run_dir) as run:
rfm.save(run.files)
rfm2 = TestRunFilesMap(run_dir)
assert rfm2.cache_file == run_map_path
file_info = rfm2.get(extra_file)
assert isinstance(file_info['train_ids'], np.ndarray)
assert isinstance(file_info['control_sources'], frozenset)
assert isinstance(file_info['instrument_sources'], frozenset)
# Modify a file; this should make the cache invalid
with h5py.File(extra_file, 'r+') as f:
f.attrs['test_save_load_map'] = 1
rfm3 = TestRunFilesMap(run_dir)
assert rfm3.cache_file == run_map_path
assert rfm3.get(extra_file) is None
| {
"content_hash": "a5b81a37dd4f39412a75c3ad751fce60",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 75,
"avg_line_length": 32.265822784810126,
"alnum_prop": 0.6418203216947823,
"repo_name": "European-XFEL/h5tools-py",
"id": "f926d47c458a5d7701ec812d2a125c62e97d2d03",
"size": "2549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "karabo_data/tests/test_run_files_map.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "711"
},
{
"name": "Python",
"bytes": "102840"
}
],
"symlink_target": ""
} |
import zlib
import logging
from contextlib import contextmanager
import redis
from redis.client import StrictRedis, StrictPipeline
class TracingPipeline(StrictPipeline):
"""Simple sublcass that logs pipeline stacks to the trace logger
"""
def __init__(self, *args, **kwargs):
self.trace = kwargs.pop('trace', False)
super(TracingPipeline, self).__init__(*args, **kwargs)
def execute(self, *args, **kwargs):
log = logging.getLogger('redis.trace.pipeline')
# self.connection_pool.total_commands += 1
for num, cmd in enumerate(self.command_stack):
self.connection_pool.total_pipeline_commands += 1
if self.trace:
log.debug('[%04d:%03d] %s',
self.connection_pool.total_commands, num, cmd[0])
return StrictPipeline.execute(self, *args, **kwargs)
class TracingRedis(StrictRedis):
"""Simple subclass that counts how many queries this connection ran.
Also does some special handling of None and unicode objects.
"""
def __init__(self, *args, **kwargs):
self.trace = kwargs.pop('trace', False)
super(TracingRedis, self).__init__(*args, **kwargs)
self.log = logging.getLogger('redis.trace')
def pipeline(self, context=None, transaction=True, shard_hint=None):
self.connection_pool.total_pipelines += 1
if self.trace:
self.log.debug("[%04d] PIPELINE BEGIN",
self.connection_pool.total_commands)
return TracingPipeline(self.connection_pool, self.response_callbacks,
transaction, shard_hint)
def execute_command(self, *args, **kwargs):
# self.connection_pool.total_commands += 1
if self.trace:
self.log.debug('[%04d] %s', self.connection_pool.total_commands,
args)
return super(TracingRedis, self).execute_command(*args, **kwargs)
def mangler(void, value):
"""mangling function to replace `encode` in subclasses of a redis
connection, responsible for delivering unicode as bytestrings, as well as
handling null responses as ''
"""
# encode unicode strings to utf-8, None -> ''
if isinstance(value, unicode):
return value.encode('utf8')
elif value is None:
return ''
else:
return str(value)
class ManglingConnection(redis.Connection):
encode = mangler
class ManglingUnixDomainSocketConnection(redis.UnixDomainSocketConnection):
encode = mangler
class CountingPool(redis.ConnectionPool):
def __init__(self, *args, **kwargs):
super(CountingPool, self).__init__(*args, **kwargs)
self.reset_counts()
self.log = logging.getLogger('redis.counting_pool')
self.log.debug("Creating new pool")
def reset_counts(self):
self.total_connections_leased = 0
self.total_commands = 0
self.total_pipelines = 0
self.total_pipeline_commands = 0
def get_connection(self, *args, **kwargs):
self.total_connections_leased += 1
c = super(CountingPool, self).get_connection(*args, **kwargs)
self.log.debug("Getting connection from pool (id: %s)", id(c))
return c
def make_connection(self, *args, **kwargs):
# get_connection will call make_connection as needed, so we don't want
# to count another leased connection here.
c = super(CountingPool, self).make_connection(*args, **kwargs)
self.log.debug("Making new connection (id: %s)", id(c))
return c
class ConnectionManager(object):
connections = {}
def __init__(self, host=None, port=None, unix_socket_path=None, dbnum=0,
trace=False, decode_responses=True):
"""set module level configuration
:param host: hostname for the redis server
:param port: port for the redis server
:param dbnum: database number to connect to
:parma decode_responses: What to set the decode_responses option on the
connection objects to.
"""
self.log = logging.getLogger('redis')
self.host = host
self.port = port
self.unix_socket_path = unix_socket_path
self.dbnum = dbnum
self.trace = trace
self.decode_responses = decode_responses
sane_config = False
if self.unix_socket_path is not None:
self.log.info("Initializing redis connection pool on UNIX:%s "
"[db:%s]", self.unix_socket_path, self.dbnum)
sane_config = True
elif self.host is not None and self.port is not None:
self.log.info("Initializing redis connection pool on "
"TCP:%s:%s [db:%s]", self.host, self.port,
self.dbnum)
sane_config = True
else:
self.log.error("Connection pool cannot be initialized unless "
"either host and port are set, or "
"unix_socket_path is set")
if sane_config:
self._get_pool()
else:
# replace our pool getter with nothing
self._get_pool = lambda: None
def init_app(self, app):
"""This callback can be used to initialize an application for the
use with this database setup. Never use a database in the context
of an application not initialized that way or connections will
leak.
"""
self.app = app
self.host = app.config.get('REDIS_HOST', 'localhost')
self.port = app.config.get('REDIS_PORT', 6379)
if app.config.get('TESTING'):
self.dbnum = app.config.get('REDIS_TEST_DBNUM', 0)
else:
self.dbnum = app.config.get('REDIS_DBNUM', 0)
self.decode_responses = app.config.get('REDIS_DECODE_RESPONSES', True)
def __str__(self):
if self.unix_socket_path:
return "<ConnectionManager UNIX:%s [db:%s] %x>" % (
self.unix_socket_path, self.dbnum, id(self))
else:
return "<ConnectionManager TCP:%s:%s [db:%s] %x>" % (
self.host, self.port, self.dbnum, id(self))
def _get_pool(self):
"""Create a connection pool using our custom connection type, and cache
the result at the class level
"""
cache_key = "%s:%s:%s:%s" % (self.unix_socket_path, self.host,
self.port, self.dbnum)
pool = ConnectionManager.connections.get(cache_key)
self.log.debug('getting a pool for %s', cache_key)
if pool is None:
# hasn't been created yet, let's make it
self.log.debug("creating new redis pool on %s", cache_key)
pool_options = {
'db': self.dbnum,
'decode_responses': self.decode_responses,
'encoding': 'utf-8'
}
if self.unix_socket_path:
pool_options['connection_class'] = \
ManglingUnixDomainSocketConnection
pool_options['path'] = self.unix_socket_path
else:
pool_options['connection_class'] = ManglingConnection
pool_options['host'] = self.host
pool_options['port'] = self.port
pool = CountingPool(**pool_options)
ConnectionManager.connections[cache_key] = pool
return pool
@classmethod
def get_summary(klass):
out = {}
for key, pool in klass.connections.items():
out[key] = {
'key': key,
'total_connections_leased': pool.total_connections_leased,
'total_commands': pool.total_commands,
'total_pipelines': pool.total_pipelines,
'total_pipeline_commands': pool.total_pipeline_commands,
'full_total': (pool.total_commands +
pool.total_pipeline_commands)
}
return out
@classmethod
def reset_counts(klass):
for pool in klass.connections.values():
pool.reset_counts()
@contextmanager
def connection(self, context=None):
con = None
try:
con = TracingRedis(trace=self.trace,
connection_pool=self._get_pool())
yield con
except Exception:
self.log.exception("Something blew up in the redis "
"context manager")
raise
finally:
self.log.debug("returning connection to pool")
if con:
del con
@contextmanager
def pipeline(self, context=None):
con = None
try:
con = TracingRedis(trace=self.trace,
connection_pool=self._get_pool())
with con.pipeline() as pipe:
try:
yield pipe
pipe.execute()
except:
self.log.exception("Something blew up inside "
"a pipeline context")
pipe.reset()
raise
except Exception:
self.log.exception("Something blew up in the redis "
"context manager")
raise
finally:
self.log.debug("returning connection to pool")
if con:
del con
def raw_connection(self):
"""Get a raw connection object from the pool, and return it to the
client, this is most useful to third party libs or simple scripts that
want direct access to a StrictRedis-like connection object
You should close or del this object as soon as you're done with it.
"""
return TracingRedis(trace=self.trace, connection_pool=self._get_pool())
class ShardedConnectionManager(object):
@classmethod
def from_config(klass, conf):
# some consistency checks first...
pool_map = {}
shard_map = {}
for i in xrange(conf.shard_count):
shard_map[i] = None
for server_info in conf.member_servers:
# add this server to our pool map
pool_map[server_info.url] = None
# add all the shards this server is responsible for (ranges in
# config are inclusive so add 1 to the end)
shard_list = xrange(server_info.first_shard,
server_info.last_shard + 1)
for shard in shard_list:
# this shard should already be in the shard_map, but with a
# None value
if not shard_map in shard:
raise ValueError("Shard range found in member %s is out "
"of bounds for the set!" % server_info)
elif shard_map[shard] is not None:
raise ValueError("Shard %s is claimed by more than 1 "
"server!" % shard)
shard_map[shard] = server_info.url
# at this point all shards should be claimed
if not all(shard_map.values()):
raise ValueError("Shard set claims there are %s shards, but not "
"all are claimed by a member server" %
conf.shard_count)
scm = klass(shard_map, pool_map)
scm.set_ready(True)
return scm
@staticmethod
def parse_url(url):
import urlparse
u = urlparse.urlparse(url)
return {
'db': int(u.path.replace("/", "") or 0),
'host': u.hostname,
'port': int(u.port or 6379)
}
def __init__(self, shard_map, pool_map):
self.log = logging.getLogger('redis_sharded')
self.pool_map = pool_map
self.shard_map = shard_map
for url in pool_map.keys():
# a list of redis urls that we need to make pools for
self.log.debug("mapping member server: %s", url)
pool = redis.connection.ConnectionPool(**self.parse_url(url))
pool.url = url
self.pool_map[url] = pool
self.log.info("Shard Map created %d shards over %d servers",
len(shard_map), len(pool_map))
self.__ready = False
def set_ready(self, is_ready=True):
self.__ready = is_ready
def shard_node(self, node_id):
"""Consistently hash node_id, and figure out which shard the hash
belongs to """
# use crc32 (adler32 is faster, but has more collisions)
return zlib.crc32("%d" % node_id) % len(self.shard_map.keys())
def flushdb(self):
"""Destroy every shard's db
"""
for pool in self.pool_map.values():
con = StrictRedis.from_url(pool.url, connection_pool=pool)
self.log.debug("flushing shard member: %s", con)
con.flushdb()
del con
@contextmanager
def connection(self, node_id):
if not self.__ready:
return
shard = self.shard_node(node_id)
try:
pool = self.pool_map.get(self.shard_map.get(shard))
con = StrictRedis.from_url(pool.url, connection_pool=pool)
yield con
except Exception:
self.log.exception("Something blew up in the Redis context "
"manager")
raise
finally:
del con
@contextmanager
def pipeline(self, node_id):
if not self.__ready:
return
shard = self.shard_node(node_id)
try:
pool = self.pool_map.get(self.shard_map.get(shard))
con = StrictRedis.from_url(pool.url, connection_pool=pool)
with con.pipeline() as pipe:
try:
yield pipe
pipe.execute()
except:
self.log.exception("Something blew up inside a pipeline "
"context")
pipe.reset()
raise
except Exception:
self.log.exception("Something blew up in the Redis context "
"manager")
raise
finally:
del con
| {
"content_hash": "083ac74efb6c81389ba9182b57987ce3",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 79,
"avg_line_length": 37.31428571428572,
"alnum_prop": 0.5545036892663233,
"repo_name": "nficano/jotonce.com",
"id": "203b3ad52a9281ca9160e4c9f2797a54f8da13b9",
"size": "14391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jotonce/lib/redis_con.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34197"
},
{
"name": "Shell",
"bytes": "2909"
}
],
"symlink_target": ""
} |
from wtforms import *
from wtforms.validators import *
import wtforms.fields
import wtforms.widgets
from util import MultiValueDict
class BaseForm(Form):
def __init__(self, handler=None, obj=None, prefix='', formdata=None, **kwargs):
if handler:
formdata = MultiValueDict()
for name in handler.request.arguments.keys():
formdata.setlist(name, handler.get_arguments(name))
Form.__init__(self, formdata, obj=obj, prefix=prefix, **kwargs)
class TagListField(wtforms.fields.Field):
widget = wtforms.widgets.TextInput()
def _value(self):
if self.data:
return u', '.join(self.data)
return u''
def process_formdata(self, valuelist):
if valuelist:
self.data = list(set(x.strip().lower() for x in valuelist[0].strip().split(',')))
self.data.sort()
else:
self.data = []
class HelloForm(BaseForm):
planet = TextField('name', validators=[Required()])
class BookmarkForm(BaseForm):
title = TextField('Title', [Required()])
url = TextField('Url', [Required()])
description = TextAreaField('Description')
tags = TagListField('Tags')
class BookmarkletForm(BaseForm):
title = TextField('Title')
url = TextField('Url', [Required()])
description = TextAreaField('Description')
tags = TagListField('Tags')
| {
"content_hash": "ad6d737616e94a7057e8bceb565a57c8",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 87,
"avg_line_length": 26.916666666666668,
"alnum_prop": 0.6795665634674922,
"repo_name": "haldun/bookmarks",
"id": "5748e23a642fdfc12f8ba33ba1f758f604d362fb",
"size": "1292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "232"
},
{
"name": "Python",
"bytes": "20623"
}
],
"symlink_target": ""
} |
"""This module provides a common interface for all HTTP requests.
HttpResponse: Represents the server's response to an HTTP request. Provides
an interface identical to httplib.HTTPResponse which is the response
expected from higher level classes which use HttpClient.request.
GenericHttpClient: Provides an interface (superclass) for an object
responsible for making HTTP requests. Subclasses of this object are
used in AtomService and GDataService to make requests to the server. By
changing the http_client member object, the AtomService is able to make
HTTP requests using different logic (for example, when running on
Google App Engine, the http_client makes requests using the App Engine
urlfetch API).
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
USER_AGENT = '%s GData-Python/1.3.2'
class Error(Exception):
pass
class UnparsableUrlObject(Error):
pass
class ContentLengthRequired(Error):
pass
class HttpResponse(object):
def __init__(self, body=None, status=None, reason=None, headers=None):
"""Constructor for an HttpResponse object.
HttpResponse represents the server's response to an HTTP request from
the client. The HttpClient.request method returns a httplib.HTTPResponse
object and this HttpResponse class is designed to mirror the interface
exposed by httplib.HTTPResponse.
Args:
body: A file like object, with a read() method. The body could also
be a string, and the constructor will wrap it so that
HttpResponse.read(self) will return the full string.
status: The HTTP status code as an int. Example: 200, 201, 404.
reason: The HTTP status message which follows the code. Example:
OK, Created, Not Found
headers: A dictionary containing the HTTP headers in the server's
response. A common header in the response is Content-Length.
"""
if body:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
else:
self._body = None
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def read(self, amt=None):
if not amt:
return self._body.read()
else:
return self._body.read(amt)
class GenericHttpClient(object):
debug = False
def __init__(self, http_client, headers=None):
"""
Args:
http_client: An object which provides a request method to make an HTTP
request. The request method in GenericHttpClient performs a
call-through to the contained HTTP client object.
headers: A dictionary containing HTTP headers which should be included
in every HTTP request. Common persistent headers include
'User-Agent'.
"""
self.http_client = http_client
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
return self.http_client.request(operation, url, data=data,
headers=all_headers)
def get(self, url, headers=None):
return self.request('GET', url, headers=headers)
def post(self, url, data, headers=None):
return self.request('POST', url, data=data, headers=headers)
def put(self, url, data, headers=None):
return self.request('PUT', url, data=data, headers=headers)
def delete(self, url, headers=None):
return self.request('DELETE', url, headers=headers)
class GenericToken(object):
"""Represents an Authorization token to be added to HTTP requests.
Some Authorization headers included calculated fields (digital
signatures for example) which are based on the parameters of the HTTP
request. Therefore the token is responsible for signing the request
and adding the Authorization header.
"""
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""For the GenericToken, no Authorization token is set."""
return http_client.request(operation, url, data=data, headers=headers)
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
Since the generic token doesn't add an auth header, it is not valid for
any scope.
"""
return False
| {
"content_hash": "531713e4938e6c93549319cdcfa965a0",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 77,
"avg_line_length": 32.267605633802816,
"alnum_prop": 0.689873417721519,
"repo_name": "edisonlz/fruit",
"id": "1508c38f80fe39fb954df70a02d8ee995f4671fa",
"size": "5183",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "web_project/base/site-packages/atom/http_interface.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1482"
},
{
"name": "Batchfile",
"bytes": "6714"
},
{
"name": "C",
"bytes": "3085"
},
{
"name": "C++",
"bytes": "4823"
},
{
"name": "CSS",
"bytes": "660927"
},
{
"name": "DIGITAL Command Language",
"bytes": "27853"
},
{
"name": "GAP",
"bytes": "6045"
},
{
"name": "Go",
"bytes": "13616"
},
{
"name": "Groff",
"bytes": "7199"
},
{
"name": "HTML",
"bytes": "7678961"
},
{
"name": "Java",
"bytes": "208173"
},
{
"name": "JavaScript",
"bytes": "2626051"
},
{
"name": "Makefile",
"bytes": "16810"
},
{
"name": "Nginx",
"bytes": "19215"
},
{
"name": "PHP",
"bytes": "205978"
},
{
"name": "Perl",
"bytes": "27627"
},
{
"name": "Python",
"bytes": "15609476"
},
{
"name": "Shell",
"bytes": "13663"
},
{
"name": "TeX",
"bytes": "60714"
}
],
"symlink_target": ""
} |
__time__ = __import__('time')
from jevent import ioloop
IDLE_SLEEP = 0.01
class timerloop(object):
def __init__(self, coreloop):
self.coreloop = coreloop
def wait(self, secs):
if secs <= 0:
# TODO: do we want to register, switch, and let it switch back?
return
mint = min(IDLE_SLEEP, secs)
self.coreloop.register_listener()
while secs > 0:
pretime = __time__.time()
if self.coreloop.switch():
__time__.sleep(mint)
secs -= __time__.time() - pretime
self.coreloop.unregister_listener()
_timerloops = {}
def timer():
import threading
t = threading.current_thread()
if t not in _timerloops:
l = timerloop(ioloop.coreloop())
_timerloops[t] = l
return _timerloops[t]
def sleep(secs):
timer().wait(secs)
| {
"content_hash": "2afbcc943279243d4981a8ffb4eaa0c7",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 25.61764705882353,
"alnum_prop": 0.5602755453501722,
"repo_name": "reversefold/jevent",
"id": "a65f2ca9fd2fc58dc111841f7d06b9fd039a1794",
"size": "871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jevent/time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28185"
}
],
"symlink_target": ""
} |
from bottle import template, redirect
import utils
def GET(**params):
return template('materiales_index.html')
| {
"content_hash": "2f6292b6082b87ab638af1393a8521b2",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 44,
"avg_line_length": 23.2,
"alnum_prop": 0.7586206896551724,
"repo_name": "andres-hurtado-lopez/naranjaverdeprod",
"id": "a75da42c30a81b005fdbf6018079ab1942a4ff1f",
"size": "163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/produccion/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "214"
},
{
"name": "CSS",
"bytes": "313"
},
{
"name": "HTML",
"bytes": "179287"
},
{
"name": "JavaScript",
"bytes": "120099"
},
{
"name": "Python",
"bytes": "162953"
},
{
"name": "Shell",
"bytes": "1767"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('jobs', '0008_job_tags'),
]
operations = [
migrations.CreateModel(
name='TagVariant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('variant', models.CharField(max_length=255)),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='taggit.Tag')),
],
),
]
| {
"content_hash": "f6b2d59ce7e277f7bccb537206065b3a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 114,
"avg_line_length": 29.82608695652174,
"alnum_prop": 0.5889212827988338,
"repo_name": "ScorpionResponse/freelancefinder",
"id": "19efb9455ff9b906f2c97f67644f27c614d74841",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freelancefinder/jobs/migrations/0009_tagvariant.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "78142"
},
{
"name": "HTML",
"bytes": "64094"
},
{
"name": "JavaScript",
"bytes": "15641"
},
{
"name": "Makefile",
"bytes": "2402"
},
{
"name": "Python",
"bytes": "366512"
}
],
"symlink_target": ""
} |
from numpy import *
from numpy import f2py # not part of import *
from scitools.StringFunction import StringFunction
import time, sys, os
# make sys.path so we can find Grid2D.py:
sys.path.insert(0, os.path.join(os.environ['scripting'],
'src','py','examples'))
from Grid2D import Grid2D
try:
import ext_gridloop
except ImportError:
print 'You must first build the ext_gridloop module'
sys.exit(1)
class Grid2Deff(Grid2D):
def ext_gridloop1(self, f):
"""Compute a[i,j] = f(xi,yj) in an external routine."""
# a is made here, sent to the routine, and then returned
a = zeros((self.xcoor.size, self.ycoor.size))
# C/C++ or Fortran module?
if ext_gridloop.__doc__ is not None:
if 'f2py' in ext_gridloop.__doc__:
# Fortran extension module
a = asarray(a, order='Fortran')
# could store a as self.a to avoid making Fortran
# arrays in repeated calls
ext_gridloop.gridloop1(a, self.xcoor, self.ycoor, f)
return a
def ext_gridloop2(self, f):
"""Compute a[i,j] = f(xi,yj) in an external routine."""
# a is made in the external routine
a = ext_gridloop.gridloop2(self.xcoor, self.ycoor, f)
return a
def ext_gridloop_exceptions(self, f):
"""Test error handling in the extension module."""
try: #1
ext_gridloop.gridloop1((1,2), self.xcoor, self.ycoor[1:], f)
except:
print sys.exc_type, sys.exc_value
try: #2
ext_gridloop.gridloop1(self.xcoor, self.xcoor, self.ycoor[1:], f)
except:
print sys.exc_type, sys.exc_value
try: #3
ext_gridloop.gridloop2(self.xcoor, self.ycoor, 'abc')
except:
print sys.exc_type, sys.exc_value
try: #4
ext_gridloop.gridloop2(array(self.xcoor,Complex64),
self.ycoor, 'abc')
except:
print sys.exc_type, sys.exc_value
try: #5
ext_gridloop.gridloop2(array([[0,0],[1,2]]), self.ycoor, 'abc')
except:
print sys.exc_type, sys.exc_value
# NOTE: the three next functions are only available in the
# Fortran 77 extension module:
def ext_gridloop_vec1(self, f):
"""As ext_gridloop2, but vectorized callback."""
a = zeros((self.xcoor.size, self.ycoor.size))
a = ext_gridloop.gridloop_vec1(a, self.xcoor, self.ycoor, f)
return a
def ext_gridloop_vec2(self, f):
"""As ext_gridloop_vec1, but callback to func. w/grid arg."""
a = zeros((self.xcoor.size, self.ycoor.size))
a = ext_gridloop.gridloop_vec2(a, f, func1_extra_args=(self,))
return a
def myfuncf3(self, a):
a[:,:] = myfunc(self.xcoorv, self.ycoorv) # in-place mod.
def ext_gridloop_vec3(self, f):
"""As ext_gridloop_vec2, but callback to class method."""
a = zeros((self.xcoor.size, self.ycoor.size))
a = ext_gridloop.gridloop_vec2(a, f)
return a
def ext_gridloop2_str(self, f77_name):
"""
Call an interface to ext_gridloop.gridloop2, which avoids
callbacks to Python and calls the f77_name F77 function
instead.
"""
a = ext_gridloop.gridloop2_str(self.xcoor, self.ycoor,
f77_name)
return a
def ext_gridloop_noalloc(self, f77_name, a):
"""
As ext_gridloop2_str, but a is intent(in,out), i.e., there is
no allocation of a in the wrapper code. If the function
is called a large number of times (as in our efficiency
tests), intent(in,out) increases the performance.
"""
a = ext_gridloop.gridloop_noalloc(a, self.xcoor, self.ycoor,
f77_name)
return a
def ext_gridloop2_fcb(self):
"""As ext_gridloop2, but compiled Fortran callback func."""
import callback
a = callback.gridloop2_fcb(self.xcoor, self.ycoor)
return a
def ext_gridloop2_fcb_compile(self, fstr):
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not %s', type(fstr)
# generate Fortran source
source = """
real*8 function fcb(x, y)
real*8 x, y
fcb = %s
return
end
subroutine gridloop2_fcb(a, xcoor, ycoor, nx, ny)
integer nx, ny
real*8 a(nx,ny), xcoor(nx), ycoor(ny)
Cf2py intent(out) a
Cf2py intent(in) xcoor
Cf2py intent(in) ycoor
Cf2py depend(nx,ny) a
real*8 fcb
external fcb
call gridloop2(a, xcoor, ycoor, nx, ny, fcb)
return
end
""" % fstr
# compile callback code and link with ext_gridloop.so:
f2py_args = "--fcompiler=Gnu --build-dir=tmp2"\
" -DF2PY_REPORT_ON_ARRAY_COPY=1 "\
" ./ext_gridloop.so"
r = f2py.compile(source, modulename='callback',
extra_args=f2py_args, verbose=True,
source_fn='_cb.f')
if r:
print 'unsuccessful compilation'; sys.exit(1)
import callback # see if we can import successfully
def ext_gridloop2_fcb_ptr(self):
"""As ext_gridloop2, but compiled Fortran callback func."""
from callback import fcb
a = ext_gridloop.gridloop2(self.xcoor, self.ycoor,
fcb._cpointer)
return a
def ext_gridloop2_fcb_ptr_compile(self, fstr):
if not isinstance(fstr, StringFunction):
raise TypeError, \
'fstr must be StringFunction, not %s', type(fstr)
source = fstr.F77_code('fcb')
f2py_args = "--fcompiler=Gnu --build-dir=tmp2"
r = f2py.compile(source, modulename='callback',
extra_args=f2py_args, verbose=True,
source_fn='_cb.f')
if r:
print 'unsuccessful compilation'; sys.exit(1)
import callback # see if we can import successfully
def ext_gridloop2_compile(self, fstr):
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not', type(fstr)
# generate Fortran source for gridloop2:
source = """
subroutine gridloop2(a, xcoor, ycoor, nx, ny)
integer nx, ny
real*8 a(nx,ny), xcoor(nx), ycoor(ny)
Cf2py intent(out) a
Cf2py depend(nx,ny) a
integer i,j
real*8 x, y
do j = 1,ny
y = ycoor(j)
do i = 1,nx
x = xcoor(i)
a(i,j) = %s
end do
end do
return
end
""" % fstr
f2py_args = "--fcompiler=Gnu --build-dir tmp1"\
" -DF2PY_REPORT_ON_ARRAY_COPY=1"
r = f2py.compile(source, modulename='ext_gridloop2',
extra_args=f2py_args, verbose=True,
source_fn='_cb.f')
if r:
print 'unsuccessful compilation'; sys.exit(1)
import ext_gridloop2 # see if we can import successfully
def ext_gridloop2_v2(self):
"""
As ext_gridloop2, but the Fortran gridloop2 function was
generated and compiled in Python (in ext_gridloop2_compile).
"""
import ext_gridloop2
return ext_gridloop2.gridloop2(self.xcoor, self.ycoor)
def ext_gridloop2_weave(self, fstr):
"""Migrate loop to C++ with aid of Weave."""
try:
from scipy import weave
except ImportError:
print 'Could not import weave.\nContinue...'
return
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not', type(fstr)
# the callback function is now coded in C++
# (fstr must be valid C++ code):
extra_code = r"""
double cppcb(double x, double y) {
return %s;
}
""" % fstr
# the loop in C++ (with Blitz++ array syntax):
code = r"""
int i,j;
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
a(i,j) = cppcb(xcoor(i), ycoor(j));
}
}
"""
nx = self.nx; ny = self.ny
a = zeros((nx, ny))
xcoor = self.xcoor; ycoor = self.ycoor
err = weave.inline(code, ['a', 'nx', 'ny', 'xcoor', 'ycoor'],
type_converters=weave.converters.blitz,
support_code=extra_code, compiler='gcc')
# a is filled
return a
def ext_gridloop1_instant(self, fstr):
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not', type(fstr)
# generate C source for gridloop1:
# (no call to C function f(x,y), fstr is inserted in the loop)
source = """
void gridloop1(double *a, int nx, int ny,
double *xcoor, double *ycoor)
{
# define index(a, i, j) a[i*ny + j]
int i, j; double x, y;
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
x = xcoor[i]; y = ycoor[i];
index(a, i, j) = %s
}
}
}
""" % fstr
try:
from instant import inline_with_numpy
a = zeros((self.nx, self.ny))
arrays = [['nx', 'ny', 'a'],
['nx', 'xcoor'],
['ny', 'ycoor']]
self.gridloop1_instant = \
inline_with_numpy(source, arrays=arrays)
except:
self.gridloop1_instant = None
def dump(self, a):
"""Nice printout of a 2D array a."""
for i in xrange(a.shape[0]):
for j in xrange(a.shape[1]):
print 'value at (%g,%g) \t = a[%d,%d] = %g' % \
(self.xcoor[i], self.ycoor[j], i, j, a[i,j])
def gridloop_psyco_init(self, method):
"""Try to accelerate Grid2D.gridloop with psyco."""
# define method self.gridloop_psyco:
try:
import psyco
self.gridloop_psyco = psyco.proxy(method)
except ImportError:
self.gridloop_psyco = method
def f1(x,y):
print 'x+2*y =',x+2*y
return x+2*y
def verify1():
"""Basic test of the extension module."""
g = Grid2Deff(dx=0.5, dy=1)
f_exact = g(f1) # NumPy computation
expression1 = StringFunction('x + 2*y',
independent_variables=('x','y'),
globals=globals())
f = g.ext_gridloop1(f1)
print 'f computed by external gridloop1 function and f1:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g.ext_gridloop2(f1)
print 'f computed by external gridloop2 function and f1:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g.ext_gridloop1(expression1)
print 'f computed by external gridloop1 function and StringFunction:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g.ext_gridloop2(expression1)
print 'f computed by external gridloop2 function and StringFunction:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
fast_func = expression1.__call__
f = g.ext_gridloop2(fast_func)
print 'f computed by external gridloop2 function and StringFunction.__call__:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g(expression1)
print 'f computed by __call__ and StringFunction:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
# check printing:
print 'array seen from Python:'
g.dump(f)
if 'dump' in dir(ext_gridloop):
print 'array seen from Fortran (transposed, but right values):'
ext_gridloop.dump(f, g.xcoor, g.ycoor)
def myfunc(x, y):
return sin(x*y) + 8*x
def myfuncf1(a, xcoor, ycoor, nx, ny):
"""Vectorized function to be called from extension module."""
#print 'myfuncf1; type of args:',type(a),type(xcoor),type(nx)
x = xcoor[:,newaxis]
y = ycoor[newaxis,:]
a[:,:] = myfunc(x, y) # in-place modification of a
print 'myfuncf1, a=',a
def myfuncf2(a, g):
"""Vectorized function to be called from extension module."""
#print 'myfuncf2; type of args:',type(a),type(g)
a[:,:] = myfunc(g.xcoorv, g.ycoorv) # in-place modification of a
def verify2(n=3):
"""
Test of some methods in class Grid2Deff that call up
some F77 routines for improving the efficiency of callbacks
to Python.
"""
if not 'gridloop_vec2' in dir(ext_gridloop):
raise ImportError, 'verify2 works only for F77 module'
dx = 1.0/n
g = Grid2Deff(dx=dx, dy=dx)
from StringIO import StringIO
from scitools.numpyutils import arr
a_exact = arr(file_=StringIO("""
0. 0. 0. 0.
2.66666667 2.7775493 2.88706441 2.99386136
5.33333333 5.55373108 5.7632897 5.95170314
8. 8.3271947 8.6183698 8.84147098"""))
def _check():
if not allclose(a, a_exact):
print 'ERROR, a is wrong, correct a reads\n', a_exact
else:
print 'correct array'
a = g.ext_gridloop_vec1(myfuncf1)
print "g.ext_gridloop_vec1(myfuncf1): a=\n",a
_check()
a = g.ext_gridloop_vec2(myfuncf2)
print "g.ext_gridloop_vec2(myfuncf2): a=\n",a
_check()
# need f2py version > 2.42 (callback to class method):
a = g.ext_gridloop_vec3(g.myfuncf3)
print "g.ext_gridloop_vec3(g.myfuncf3): a=\n",a
_check()
a = g.ext_gridloop2_str('myfunc')
print "g.ext_gridloop_str('myfunc'): a=\n",a
_check()
a = g.ext_gridloop_noalloc('myfunc', a)
print "g.ext_gridloop_str_noalloc('myfunc'): a=\n",a
_check()
fstr = 'sin(x*y) + 8*x'
g.ext_gridloop2_fcb_compile(fstr)
a = g.ext_gridloop2_fcb()
print "g.gridloop2_fcb: a=\n",a
_check()
import callback
print 'contents of callback module:', dir(callback)
fstr = StringFunction('sin(x*y) + 8*x')
g.ext_gridloop2_fcb_ptr_compile(fstr)
a = g.ext_gridloop2_fcb_ptr()
print "g.gridloop2_fcb_ptr: a=\n",a
_check()
import callback
print 'fcb callback module:', dir(callback), dir(callback.fcb)
g.ext_gridloop2_compile(fstr)
a = g.ext_gridloop2_v2()
print "g.gridloop2_v2: a=\n",a
_check()
a = g.ext_gridloop2_weave(fstr)
print "g.gridloop2_weave: a=\n",a
_check()
g.gridloop_psyco_init(g.gridloop)
a = g.gridloop_psyco(fstr)
print "g.gridloop_psyco(str): a=\n",a
_check()
a = g.gridloop_psyco(myfunc)
print "g.gridloop_psyco(func): a=\n",a
_check()
g.ext_gridloop1_instant(fstr)
g.gridloop1_instant(a, g.nx, g.ny, g.xcoor, g.ycoor)
print "g.gridloop1_instant: a=\n", a
def timing2(n=2000, best_time=1.0):
"""Time different implementations of the extension module."""
print 'Grid2Deff.timing2: reference CPU time = %g' % best_time
dx = 1.0/n
g = Grid2Deff(dx=dx, dy=dx)
# here we use straight NumPy sin in a scalar context:
def myfunc1(x, y):
return sin(x*y) + 8*x
def myfunc2(x, y):
return math.sin(x*y) + 8*x
expression1 = StringFunction('sin(x*y) + 8*x',
independent_variables=('x','y'),
globals=globals())
expression1_f = expression1.__call__ # for efficiency and F77 callback
expression2 = StringFunction('math.sin(x*y) + 8*x',
independent_variables=('x','y'),
globals=globals())
expression2_f = expression2.__call__ # for efficiency and F77 callback
from scitools.misc import timer
from scitools.EfficiencyTable import EfficiencyTable
e = EfficiencyTable('Grid2Deff tests, %dx%d grid' % (n,n), best_time)
t0a = timer(g.gridloop, (myfunc1,), repetitions=1)
e.add('g.gridloop, myfunc1', t0a)
t0b = timer(g.gridloop, (myfunc2,), repetitions=1)
e.add('g.gridloop, myfunc2', t0b)
t0c = timer(g.__call__, (myfunc1,), repetitions=1)
e.add('g.__call__, myfunc1', t0c)
t0d = timer(g.__call__, (expression1_f,), repetitions=1)
e.add('g.__call__, expression1_f', t0d)
t0e = timer(g.gridloop_itemset, (myfunc2,), repetitions=1)
e.add('g.gridloop_itemset, myfunc2', t0e)
t1a = timer(g.ext_gridloop1, (myfunc1,), repetitions=1)
e.add('g.ext_gridloop1, myfunc1', t1a)
t1b = timer(g.ext_gridloop1, (myfunc2,), repetitions=1)
e.add('g.ext_gridloop1, myfunc2', t1b)
t2a = timer(g.ext_gridloop2, (myfunc1,), repetitions=1)
e.add('g.ext_gridloop2, myfunc1', t2a)
t2b = timer(g.ext_gridloop2, (myfunc2,), repetitions=1)
e.add('g.ext_gridloop2, myfunc2', t2b)
t3a = timer(g.ext_gridloop2, (expression1_f,), repetitions=1)
e.add('g.ext_gridloop2, expression1_f', t3a)
t3b = timer(g.ext_gridloop2, (expression2_f,), repetitions=1)
e.add('g.ext_gridloop2, expression2_f', t3b)
nrep = 20
# try the improved functions (works only for the F77 module):
if 'gridloop_vec2' in dir(ext_gridloop):
t4 = timer(g.ext_gridloop_vec2, (myfuncf2,), repetitions=nrep)
e.add('g.ext_gridloop_vec2, myfuncf2', t4)
if 'gridloop2_str' in dir(ext_gridloop):
t5 = timer(g.ext_gridloop2_str, ('myfunc',), repetitions=nrep)
e.add('g.ext_gridloop2_str, myfunc', t5)
# try the version without allocation (first, make an a array):
a = g.ext_gridloop2(myfunc1) # a has now Fortran storage
t5b = timer(g.ext_gridloop_noalloc,
('myfunc', a), repetitions=nrep)
e.add('g.ext_gridloop_noalloc, myfunc', t5b)
# try 'inline' F77 compiled callback too:
# (give F77 source for core of callback function as argument)
g.ext_gridloop2_fcb_compile(str(expression1))
t6 = timer(g.ext_gridloop2_fcb, (), repetitions=nrep)
e.add('g.ext_gridloop2_fcb(%s)' % repr(str(expression1)), t6)
g.ext_gridloop2_fcb_ptr_compile(expression1)
t6b = timer(g.ext_gridloop2_fcb_ptr, (), repetitions=nrep)
e.add('g.ext_gridloop2_fcb_ptr(%s)' % repr(expression1), t6b)
g.ext_gridloop2_compile(str(expression1))
t7 = timer(g.ext_gridloop2_v2, (), repetitions=nrep)
e.add('g.ext_gridloop2_v2(%s)' % repr(str(expression1)), t7)
# weave version:
t8 = timer(g.ext_gridloop2_weave, (str(expression1),), repetitions=nrep)
e.add('g.ext_gridloop2_weave(%s)' % repr(str(expression1)), t8)
# psyco:
g.gridloop_psyco_init(g.gridloop)
if g.gridloop_psyco != g.gridloop: # has psyco
t9a = timer(g.gridloop_psyco, (myfunc2,), repetitions=1)
e.add('g.gridloop_psyco, myfunc2', t9a)
t9b = timer(g.gridloop_psyco, (expression2_f,), repetitions=1)
e.add('g.gridloop_psyco, expression2_f', t9b)
g.gridloop_psyco_init(g.gridloop_itemset)
if g.gridloop_psyco != g.gridloop_itemset: # has psyco
t9a = timer(g.gridloop_psyco, (myfunc2,), repetitions=1)
e.add('g.gridloop_psyco (itemset), myfunc2', t9a)
t9b = timer(g.gridloop_psyco, (expression2_f,), repetitions=1)
e.add('g.gridloop_psyco (itemset), expression2_f', t9b)
# instant:
g.ext_gridloop1_instant(str(expression1))
if g.gridloop1_instant is not None:
a = zeros((self.nx, self.ny))
t10 = timer(g.gridloop1_instant,
(a, self.nx, g.ny, g.xcoor, g.ycoor),
repetitions=nrep)
e.add('g.gridloop1_instant', t10)
print '\n\n\n\nrun from directory', os.getcwd()
print e
#print 'Experiments in table:', e.experiments
def exceptions1():
"""Test exceptions raised by the extension module."""
g = Grid2Deff(dx=0.5, dy=1)
def myfunc(x, y):
return sin(x*y) + 8*x
g.ext_gridloop_exceptions(myfunc)
def run():
# provide function to call (verify1, timing2, exceptions1, etc.)
# as command-line argument
try:
func = sys.argv[1]
except:
# basic test if no command-line argument
func = 'verify1'
if func == 'timing2':
# in case of timing, specify grid size as 2nd argument:
try:
n = int(sys.argv[2])
except:
n = 1100
# specify reference executing time as 3rd argument:
try:
best_time = float(sys.argv[3])
except:
best_time = 1.0
exec 'timing2(%d, %g)' % (n, best_time)
else:
exec func + '()'
if __name__ == '__main__':
# lots of experiments:
# Grid2Deff.py timing2 1100 0.13
# 1100 is grid size, 0.13 is reference time
run()
| {
"content_hash": "7f3d65e24bf707b74f0e5a3aba0526f9",
"timestamp": "",
"source": "github",
"line_count": 602,
"max_line_length": 87,
"avg_line_length": 34.59136212624585,
"alnum_prop": 0.5740011525163273,
"repo_name": "sniemi/SamPy",
"id": "b36dd66b1c0553e4b8c4eb8395be1243fb15de83",
"size": "20847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/src1/TCSE3-3rd-examples/src/py/mixed/Grid2D/Grid2Deff.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "296"
},
{
"name": "C",
"bytes": "68436"
},
{
"name": "C++",
"bytes": "45956"
},
{
"name": "CSS",
"bytes": "35570"
},
{
"name": "Fortran",
"bytes": "45191"
},
{
"name": "HTML",
"bytes": "107435"
},
{
"name": "IDL",
"bytes": "13651"
},
{
"name": "JavaScript",
"bytes": "25435"
},
{
"name": "Makefile",
"bytes": "26035"
},
{
"name": "Matlab",
"bytes": "1508"
},
{
"name": "Perl",
"bytes": "59198"
},
{
"name": "PostScript",
"bytes": "1403536"
},
{
"name": "Prolog",
"bytes": "16061"
},
{
"name": "Python",
"bytes": "5763358"
},
{
"name": "R",
"bytes": "208346"
},
{
"name": "Rebol",
"bytes": "161"
},
{
"name": "Roff",
"bytes": "73616"
},
{
"name": "Ruby",
"bytes": "2032"
},
{
"name": "Shell",
"bytes": "41512"
},
{
"name": "Tcl",
"bytes": "44150"
},
{
"name": "TeX",
"bytes": "107783"
}
],
"symlink_target": ""
} |
import scattertext as st
movie_df = st.SampleCorpora.RottenTomatoes.get_data()
movie_df.category = movie_df.category\
.apply(lambda x: {'rotten': 'Negative', 'fresh': 'Positive', 'plot': 'Plot'}[x])
corpus = st.CorpusFromPandas(
movie_df,
category_col='category',
text_col='text',
nlp=st.whitespace_nlp_with_sentences
).build().get_unigram_corpus()
term_scorer = (st.ZScores(corpus).set_categories('Positive', ['Negative'], ['Plot']))
html = st.produce_frequency_explorer(
corpus,
category='Positive',
not_categories=['Negative'],
neutral_categories=['Plot'],
term_scorer=term_scorer,
metadata=movie_df['movie_name'],
grey_threshold=1.96,
show_neutral=True
)
file_name = 'demo_zscore.html'
open(file_name, 'wb').write(html.encode('utf-8'))
print('./' + file_name)
| {
"content_hash": "336f5866ef35abd290d0ae244f177b5a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 26.96551724137931,
"alnum_prop": 0.7033248081841432,
"repo_name": "JasonKessler/scattertext",
"id": "9e92f18dd3f3259b291513de503d01cdefd5d3af",
"size": "782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo_z_scores.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1035"
},
{
"name": "HTML",
"bytes": "52028"
},
{
"name": "JavaScript",
"bytes": "497904"
},
{
"name": "Python",
"bytes": "1183530"
},
{
"name": "Shell",
"bytes": "306"
}
],
"symlink_target": ""
} |
"""Implementation of config command for creating a gsutil configuration file."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import datetime
import json
import multiprocessing
import os
import signal
import socket
import stat
import sys
import textwrap
import time
import webbrowser
from six.moves import input
from six.moves.http_client import ResponseNotReady
import boto
from boto.provider import Provider
import gslib
from gslib.command import Command
from gslib.command import DEFAULT_TASK_ESTIMATION_THRESHOLD
from gslib.commands.compose import MAX_COMPOSE_ARITY
from gslib.cred_types import CredTypes
from gslib.exception import AbortException
from gslib.exception import CommandException
from gslib.metrics import CheckAndMaybePromptForAnalyticsEnabling
from gslib.sig_handling import RegisterSignalHandler
from gslib.utils import constants
from gslib.utils import system_util
from gslib.utils.hashing_helper import CHECK_HASH_ALWAYS
from gslib.utils.hashing_helper import CHECK_HASH_IF_FAST_ELSE_FAIL
from gslib.utils.hashing_helper import CHECK_HASH_IF_FAST_ELSE_SKIP
from gslib.utils.hashing_helper import CHECK_HASH_NEVER
from gslib.utils.parallelism_framework_util import ShouldProhibitMultiprocessing
from httplib2 import ServerNotFoundError
from oauth2client.client import HAS_CRYPTO
_SYNOPSIS = """
gsutil [-D] config [-a] [-b] [-e] [-f] [-n] [-o <file>] [-r] [-s <scope>] [-w]
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The ``gsutil config`` command applies to users who have installed gsutil as a
standalone tool. If you installed gsutil via the Cloud SDK, ``gsutil config``
fails unless you are specifically using the ``-a`` flag or have configured
gcloud to not pass its managed credentials to gsutil (via the command ``gcloud
config set pass_credentials_to_gsutil false``). For all other use cases, Cloud
SDK users should use the ``gcloud auth`` group of commands instead, which
configures OAuth2 credentials that gcloud implicitly passes to gsutil at
runtime. To check if you are using gsutil from the Cloud SDK or as a
stand-alone, use ``gsutil version -l`` and in the output look for "using cloud
sdk".
The ``gsutil config`` command obtains access credentials for Google Cloud
Storage and writes a `boto/gsutil configuration file
<https://cloud.google.com/storage/docs/boto-gsutil>`_ containing the obtained
credentials along with a number of other configuration-controllable values.
Unless specified otherwise (see OPTIONS), the configuration file is written
to ~/.boto (i.e., the file .boto under the user's home directory). If the
default file already exists, an attempt is made to rename the existing file
to ~/.boto.bak; if that attempt fails the command exits. A different
destination file can be specified with the ``-o`` option (see OPTIONS).
Because the boto configuration file contains your credentials you should
keep its file permissions set so no one but you has read access. (The file
is created read-only when you run ``gsutil config``.)
<B>CREDENTIALS</B>
By default ``gsutil config`` obtains OAuth2 credentials and writes them to the
[Credentials] section of the configuration file. Unless otherwise specified,
it requests a token allowing full control of resources in several services,
e.g. Cloud Storage, Cloud KMS (used for the 'kms' command), and Cloud Pub/Sub
(used for the 'notification' command). To request a token with more limited
scopes, you can specify additional options (see the OPTIONS section below for
the full list). Some examples include:
Create a token with read-only access for storage resources:
gsutil config -r
Create a token with read-write access for storage resources:
gsutil config -w
Create a token with full-control access for storage resources:
gsutil config -f
In addition, ``-s <scope>`` can be specified multiple times to request
additional scopes, where ``<scope>`` is specified using the full URL of the
desired scope as listed on
https://developers.google.com/identity/protocols/googlescopes.
If you want to use credentials based on access key and secret (the older
authentication method before OAuth2 was supported) instead of OAuth2,
see help about the ``-a`` option in the OPTIONS section.
If you wish to use gsutil with other providers (or to copy data back and
forth between multiple providers) you can edit their credentials into the
[Credentials] section after creating the initial boto configuration file.
<B>CONFIGURING SERVICE ACCOUNT CREDENTIALS</B>
Service accounts are useful for authenticating on behalf of a service or
application (as opposed to a user). If you use gsutil as a standalone tool,
you configure credentials for service accounts using the ``-e`` option:
gsutil config -e
Note that if you use gsutil through the Cloud SDK, you instead activate your
service account via the `gcloud auth activate-service-account
<https://cloud.google.com/sdk/gcloud/reference/auth/activate-service-account>`_
command.
When you run ``gsutil config -e``, you are prompted for the path to your
private key file and, if not using a JSON key file, your service account
email address and key file password. To get this data, follow the instructions
on `Service Accounts <https://cloud.google.com/storage/docs/authentication#generating-a-private-key>`_.
Using this information, gsutil populates the "gs_service_key_file" attribute
in the boto configuration file. If not using a JSON key file, gsutil also
populates the "gs_service_client_id" and "gs_service_key_file_password"
attributes.
Note that your service account is NOT considered an Owner for the purposes of
API access (see "gsutil help creds" for more information about this). See
https://developers.google.com/identity/protocols/OAuth2ServiceAccount for
further information on service account authentication.
<B>OPTIONS</B>
-a Prompt for Google Cloud Storage access key and secret (the older
authentication method before OAuth2 was supported) instead of
obtaining an OAuth2 token.
-b Causes ``gsutil config`` to launch a browser to obtain OAuth2
approval and the project ID instead of showing the URL for each
and asking the user to open the browser. This will probably not
work as expected if you are running gsutil from an ssh window, or
using gsutil on Windows.
-e Prompt for service account credentials. This option requires that
``-a`` is not set.
-f Request token with full control (devstorage.full_control scope).
Note that this does not provide non-storage scopes, such as those
needed to edit Pub/Sub and KMS resources (used with the
'notification' and 'kms' commands).
-n Write the configuration file without authentication configured.
This flag is mutually exclusive with all flags other than ``-o``.
-o <file> Write the configuration to <file> instead of ~/.boto.
Use ``-`` for stdout.
-r Request token with read-only access (devstorage.read_only scope).
--reauth Request token with reauth access (accounts.reauth scope).
-s <scope> Request a specific OAuth2 <scope> instead of the default(s). This
option may be repeated to request multiple scopes, and may be used
in conjunction with other flags that request a specific scope.
-w Request token with read-write access
(devstorage.read_write scope).
""")
try:
from gcs_oauth2_boto_plugin import oauth2_helper # pylint: disable=g-import-not-at-top
except ImportError:
pass
GOOG_CLOUD_CONSOLE_URI = 'https://cloud.google.com/console#/project'
CONFIG_PRELUDE_CONTENT = """
# This file contains credentials and other configuration information needed
# by the boto library, used by gsutil. You can edit this file (e.g., to add
# credentials) but be careful not to mis-edit any of the variable names (like
# "gs_access_key_id") or remove important markers (like the "[Credentials]" and
# "[Boto]" section delimiters).
#
"""
# Default number of OS processes and Python threads for parallel operations.
# On Linux systems we automatically scale the number of processes to match
# the underlying CPU/core count. Given we'll be running multiple concurrent
# processes on a typical multi-core Linux computer, to avoid being too
# aggressive with resources, the default number of threads is reduced from
# the previous value of 24 to 5.
#
# We also cap the maximum number of default processes at 32. Since each level
# of recursion depth gets its own process pool, this means a maximum of
# 64 processes with the current maximum recursion depth of 2. We limit this
# number because testing with more than 200 processes showed fatal locking
# exceptions in Python's multiprocessing.Manager. More processes are
# probably not needed to saturate most networks.
#
# On Windows and Alpine Linux, Python multi-processing presents various
# challenges so we retain compatibility with the established parallel mode
# operation, i.e. one process and 24 threads.
should_prohibit_multiprocessing, unused_os = ShouldProhibitMultiprocessing()
if should_prohibit_multiprocessing:
DEFAULT_PARALLEL_PROCESS_COUNT = 1
DEFAULT_PARALLEL_THREAD_COUNT = 24
else:
DEFAULT_PARALLEL_PROCESS_COUNT = min(multiprocessing.cpu_count(), 32)
DEFAULT_PARALLEL_THREAD_COUNT = 5
# TODO: Once compiled crcmod is being distributed by major Linux distributions
# revert DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD value to '150M'.
DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD = '0'
DEFAULT_PARALLEL_COMPOSITE_UPLOAD_COMPONENT_SIZE = '50M'
DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD = '150M'
DEFAULT_SLICED_OBJECT_DOWNLOAD_COMPONENT_SIZE = '200M'
DEFAULT_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS = 4
# Compressed transport encoded uploads buffer chunks of compressed data. When
# running many uploads in parallel, compression may consume more memory than
# available. This restricts the number of compressed transport encoded uploads
# running in parallel such that they don't consume more memory than set here.
DEFAULT_MAX_UPLOAD_COMPRESSION_BUFFER_SIZE = '2G'
# gzip compression level. This is simply making the python stdlib default explicit.
DEFAULT_GZIP_COMPRESSION_LEVEL = 9
CONFIG_BOTO_SECTION_CONTENT = """
[Boto]
# http_socket_timeout specifies the timeout (in seconds) used to tell httplib
# how long to wait for socket timeouts. The default is 70 seconds. Note that
# this timeout only applies to httplib, not to httplib2 (which is used for
# OAuth2 refresh/access token exchanges).
#http_socket_timeout = 70
# The following two options control the use of a secure transport for requests
# to S3 and Google Cloud Storage. It is highly recommended to set both options
# to True in production environments, especially when using OAuth2 bearer token
# authentication with Google Cloud Storage.
# Set 'https_validate_certificates' to False to disable server certificate
# checking. The default for this option in the boto library is currently
# 'False' (to avoid breaking apps that depend on invalid certificates); it is
# therefore strongly recommended to always set this option explicitly to True
# in configuration files, to protect against "man-in-the-middle" attacks.
https_validate_certificates = True
# 'debug' controls the level of debug messages printed for the XML API only:
# 0 for none, 1 for basic boto debug, 2 for all boto debug plus HTTP
# requests/responses.
#debug = <0, 1, or 2>
# 'num_retries' controls the number of retry attempts made when errors occur
# during data transfers. The default is 6.
# Note 1: You can cause gsutil to retry failures effectively infinitely by
# setting this value to a large number (like 10000). Doing that could be useful
# in cases where your network connection occasionally fails and is down for an
# extended period of time, because when it comes back up gsutil will continue
# retrying. However, in general we recommend not setting the value above 10,
# because otherwise gsutil could appear to "hang" due to excessive retries
# (since unless you run gsutil -D you won't see any logged evidence that gsutil
# is retrying).
# Note 2: Don't set this value to 0, as it will cause boto to fail when reusing
# HTTP connections.
#num_retries = <integer value>
# 'max_retry_delay' controls the max delay (in seconds) between retries. The
# default value is 60, so the backoff sequence will be 1 seconds, 2 seconds, 4,
# 8, 16, 32, and then 60 for all subsequent retries for a given HTTP request.
# Note: At present this value only impacts the XML API and the JSON API uses a
# fixed value of 60.
#max_retry_delay = <integer value>
"""
CONFIG_GOOGLECOMPUTE_SECTION_CONTENT = """
[GoogleCompute]
# 'service_account' specifies the a Google Compute Engine service account to
# use for credentials. This value is intended for use only on Google Compute
# Engine virtual machines and usually lives in /etc/boto.cfg. Most users
# shouldn't need to edit this part of the config.
#service_account = default
"""
CONFIG_INPUTLESS_GSUTIL_SECTION_CONTENT = """
[GSUtil]
# 'resumable_threshold' specifies the smallest file size [bytes] for which
# resumable Google Cloud Storage uploads are attempted. The default is 8388608
# (8 MiB).
#resumable_threshold = %(resumable_threshold)d
# 'rsync_buffer_lines' specifies the number of lines of bucket or directory
# listings saved in each temp file during sorting. (The complete set is
# split across temp files and separately sorted/merged, to avoid needing to
# fit everything in memory at once.) If you are trying to synchronize very
# large directories/buckets (e.g., containing millions or more objects),
# having too small a value here can cause gsutil to run out of open file
# handles. If that happens, you can try to increase the number of open file
# handles your system allows (e.g., see 'man ulimit' on Linux; see also
# http://docs.python.org/2/library/resource.html). If you can't do that (or
# if you're already at the upper limit), increasing rsync_buffer_lines will
# cause gsutil to use fewer file handles, but at the cost of more memory. With
# rsync_buffer_lines set to 32000 and assuming a typical URL is 100 bytes
# long, gsutil will require approximately 10 MiB of memory while building
# the synchronization state, and will require approximately 60 open file
# descriptors to build the synchronization state over all 1M source and 1M
# destination URLs. Memory and file descriptors are only consumed while
# building the state; once the state is built, it resides in two temp files that
# are read and processed incrementally during the actual copy/delete
# operations.
#rsync_buffer_lines = 32000
# 'state_dir' specifies the base location where files that
# need a static location are stored, such as pointers to credentials,
# resumable transfer tracker files, and the last software update check.
# By default these files are stored in ~/.gsutil
#state_dir = <file_path>
# gsutil periodically checks whether a new version of the gsutil software is
# available. 'software_update_check_period' specifies the number of days
# between such checks. The default is 30. Setting the value to 0 disables
# periodic software update checks.
#software_update_check_period = 30
# 'tab_completion_timeout' controls the timeout (in seconds) for tab
# completions that involve remote requests (such as bucket or object names).
# If tab completion does not succeed within this timeout, no tab completion
# suggestions will be returned.
# A value of 0 will disable completions that involve remote requests.
#tab_completion_timeout = 5
# 'parallel_process_count' and 'parallel_thread_count' specify the number
# of OS processes and Python threads, respectively, to use when executing
# operations in parallel. The default settings should work well as configured,
# however, to enhance performance for transfers involving large numbers of
# files, you may experiment with hand tuning these values to optimize
# performance for your particular system configuration.
#parallel_process_count = %(parallel_process_count)d
#parallel_thread_count = %(parallel_thread_count)d
# 'parallel_composite_upload_threshold' specifies the maximum size of a file to
# upload in a single stream. Files larger than this threshold will be
# partitioned into component parts and uploaded in parallel and then composed
# into a single object.
# The number of components will be the smaller of
# ceil(file_size / parallel_composite_upload_component_size) and
# MAX_COMPOSE_ARITY. The current value of MAX_COMPOSE_ARITY is
# %(max_compose_arity)d.
# If 'parallel_composite_upload_threshold' is set to 0, then automatic parallel
# uploads will never occur.
# Setting an extremely low threshold is unadvisable. The vast majority of
# environments will see degraded performance for thresholds below 80M, and it
# is almost never advantageous to have a threshold below 20M.
# 'parallel_composite_upload_component_size' specifies the ideal size of a
# component in bytes, which will act as an upper bound to the size of the
# components if ceil(file_size / parallel_composite_upload_component_size) is
# less than MAX_COMPOSE_ARITY.
# Values can be provided either in bytes or as human-readable values
# (e.g., "150M" to represent 150 mebibytes)
#
# Note: At present parallel composite uploads are disabled by default, because
# using composite objects requires a compiled crcmod (see "gsutil help crcmod"),
# and for operating systems that don't already have this package installed this
# makes gsutil harder to use. Google is actively working with a number of the
# Linux distributions to get crcmod included with the stock distribution. Once
# that is done we will re-enable parallel composite uploads by default in
# gsutil.
#
# Note: Parallel composite uploads should not be used with NEARLINE, COLDLINE,
# or ARCHIVE storage class buckets, as doing this incurs an early deletion
# charge for each component object.
#
# Note: Parallel composite uploads are not enabled with Cloud KMS encrypted
# objects as a source or destination, as composition with KMS objects is not yet
# supported.
#parallel_composite_upload_threshold = %(parallel_composite_upload_threshold)s
#parallel_composite_upload_component_size = %(parallel_composite_upload_component_size)s
#
# 'parallel_composite_upload_bypass_kms_check' removes the object/bucket KMS checks
# used to guard composition of KMS objects.
#disable_parallel_composite_upload_kms_check = False
# 'sliced_object_download_threshold' and
# 'sliced_object_download_component_size' have analogous functionality to
# their respective parallel_composite_upload config values.
# 'sliced_object_download_max_components' specifies the maximum number of
# slices to be used when performing a sliced object download.
#sliced_object_download_threshold = %(sliced_object_download_threshold)s
#sliced_object_download_component_size = %(sliced_object_download_component_size)s
#sliced_object_download_max_components = %(sliced_object_download_max_components)s
# Compressed transport encoded uploads buffer chunks of compressed data. When
# running a composite upload and/or many uploads in parallel, compression may
# consume more memory than available. This setting restricts the number of
# compressed transport encoded uploads running in parallel such that they
# don't consume more memory than set here. This is 2GiB by default.
# Values can be provided either in bytes or as human-readable values
# (e.g., "2G" to represent 2 gibibytes)
#max_upload_compression_buffer_size = %(max_upload_compression_buffer_size)s
# GZIP compression level, if using compression. Reducing this can have
# a dramatic impact on compression speed with minor size increases.
# This is a value from 0-9, with 9 being max compression.
# A good level to try is 6, which is the default used by the gzip tool.
#gzip_compression_level = %(gzip_compression_level)s
# 'task_estimation_threshold' controls how many files or objects gsutil
# processes before it attempts to estimate the total work that will be
# performed by the command. Estimation makes extra directory listing or API
# list calls and is performed only if multiple processes and/or threads are
# used. Estimation can slightly increase cost due to extra
# listing calls; to disable it entirely, set this value to 0.
#task_estimation_threshold=%(task_estimation_threshold)s
# 'use_magicfile' specifies if the 'file --mime <filename>' command should be
# used to guess content types instead of the default filename extension-based
# mechanism. Available on UNIX and macOS (and possibly on Windows, if you're
# running Cygwin or some other package that provides implementations of
# UNIX-like commands). When available and enabled use_magicfile should be more
# robust because it analyzes file contents in addition to extensions.
#use_magicfile = False
# Service account emails for testing the hmac command. If these fields are not
# populated with distinct service accounts the tests for the hmac command will
# not be run. Primarily useful for tool developers.
#test_hmac_service_account =
#test_hmac_alt_service_account =
#test_hmac_list_service_account =
# Service account emails for testing impersonation credentials. If this field is
# not populated with a service account the tests for service account
# impersonation will not run. Primarily useful for tool developers.
#test_impersonate_service_account =
# 'content_language' specifies the ISO 639-1 language code of the content, to be
# passed in the Content-Language header. By default no Content-Language is sent.
# See the ISO 639-1 column of
# http://www.loc.gov/standards/iso639-2/php/code_list.php for a list of
# language codes.
content_language = en
# 'check_hashes' specifies how strictly to require integrity checking for
# downloaded data. Legal values are:
# '%(hash_fast_else_fail)s' - (default) Only integrity check if the digest
# will run efficiently (using compiled code), else fail the download.
# '%(hash_fast_else_skip)s' - Only integrity check if the server supplies a
# hash and the local digest computation will run quickly, else skip the
# check.
# '%(hash_always)s' - Always check download integrity regardless of possible
# performance costs.
# '%(hash_never)s' - Don't perform download integrity checks. This setting is
# not recommended except for special cases such as measuring download
# performance excluding time for integrity checking.
# This option exists to assist users who wish to download a GCS composite object
# and are unable to install crcmod with the C-extension. CRC32c is the only
# available integrity check for composite objects, and without the C-extension,
# download performance can be significantly degraded by the digest computation.
# This option is ignored for daisy-chain copies, which don't compute hashes but
# instead (inexpensively) compare the cloud source and destination hashes.
#check_hashes = if_fast_else_fail
# 'encryption_key' specifies a single customer-supplied encryption key that
# will be used for all data written to Google Cloud Storage. See
# "gsutil help encryption" for more information
# Encryption key: RFC 4648 section 4 base64-encoded AES256 string
# Warning: If decrypt_key is specified without an encrypt_key, objects will be
# decrypted when copied in the cloud.
#encryption_key=
# Each 'decryption_key' entry specifies a customer-supplied decryption key that
# will be used to access and Google Cloud Storage objects encrypted with
# the corresponding key.
# Decryption keys: Up to 100 RFC 4648 section 4 base64-encoded AES256 strings
# in ascending numerical order, starting with 1.
#decryption_key1=
#decryption_key2=
#decryption_key3=
# The ability to specify an alternative JSON API version is primarily for cloud
# storage service developers.
#json_api_version = v1
# Specifies the API to use when interacting with cloud storage providers. If the
# gsutil command supports this API for the provider, it will be used instead of
# the default API. Commands typically default to XML for S3 and JSON for GCS.
# Note that if any encryption configuration options are set (see above), the
# JSON API will be used for interacting with Google Cloud Storage buckets even
# if XML is preferred, as gsutil does not currently support this functionality
# when using the XML API.
#prefer_api = json
#prefer_api = xml
# Disables the prompt asking for opt-in to data collection for analytics.
#disable_analytics_prompt = True
# The "test" command runs tests against regional buckets (unless you supply the
# `-b` option). By default, the region used is us-central1, but you can change
# the default region using this option.
#test_cmd_regional_bucket_location = us-central1
# Tests for the "notification watchbucket" command require a notification URL.
# If this option is not supplied, those tests will be skipped.
#test_notification_url = https://yourdomain.url/notification-endpoint
""" % {
'hash_fast_else_fail': CHECK_HASH_IF_FAST_ELSE_FAIL,
'hash_fast_else_skip': CHECK_HASH_IF_FAST_ELSE_SKIP,
'hash_always': CHECK_HASH_ALWAYS,
'hash_never': CHECK_HASH_NEVER,
'resumable_threshold': constants.RESUMABLE_THRESHOLD_B,
'parallel_process_count': DEFAULT_PARALLEL_PROCESS_COUNT,
'parallel_thread_count': DEFAULT_PARALLEL_THREAD_COUNT,
'parallel_composite_upload_threshold':
(DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD),
'parallel_composite_upload_component_size':
(DEFAULT_PARALLEL_COMPOSITE_UPLOAD_COMPONENT_SIZE),
'sliced_object_download_threshold':
(DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD),
'sliced_object_download_component_size':
(DEFAULT_PARALLEL_COMPOSITE_UPLOAD_COMPONENT_SIZE),
'sliced_object_download_max_components':
(DEFAULT_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS),
'max_compose_arity': MAX_COMPOSE_ARITY,
'task_estimation_threshold': DEFAULT_TASK_ESTIMATION_THRESHOLD,
'max_upload_compression_buffer_size':
(DEFAULT_MAX_UPLOAD_COMPRESSION_BUFFER_SIZE),
'gzip_compression_level': DEFAULT_GZIP_COMPRESSION_LEVEL,
}
CONFIG_OAUTH2_CONFIG_CONTENT = """
[OAuth2]
# This section specifies options used with OAuth2 authentication.
# 'token_cache' specifies how the OAuth2 client should cache access tokens.
# Valid values are:
# 'in_memory': an in-memory cache is used. This is only useful if the boto
# client instance (and with it the OAuth2 plugin instance) persists
# across multiple requests.
# 'file_system' : access tokens will be cached in the file system, in files
# whose names include a key derived from the refresh token the access token
# based on.
# The default is 'file_system'.
#token_cache = file_system
#token_cache = in_memory
# 'token_cache_path_pattern' specifies a path pattern for token cache files.
# This option is only relevant if token_cache = file_system.
# The value of this option should be a path, with place-holders '%(key)s' (which
# will be replaced with a key derived from the refresh token the cached access
# token was based on), and (optionally), %(uid)s (which will be replaced with
# the UID of the current user, if available via os.getuid()).
# Note that the config parser itself interpolates '%' placeholders, and hence
# the above placeholders need to be escaped as '%%(key)s'.
# The default value of this option is
# token_cache_path_pattern = <tmpdir>/oauth2client-tokencache.%%(uid)s.%%(key)s
# where <tmpdir> is the system-dependent default temp directory.
# The following options specify the label and endpoint URIs for the OAUth2
# authorization provider being used. Primarily useful for tool developers.
#provider_label = Google
#provider_authorization_uri = https://accounts.google.com/o/oauth2/auth
#provider_token_uri = https://oauth2.googleapis.com/token
# 'oauth2_refresh_retries' controls the number of retry attempts made when
# rate limiting errors occur for OAuth2 requests to retrieve an access token.
# The default value is 6.
#oauth2_refresh_retries = <integer value>
# The following options specify the OAuth2 client identity and secret that is
# used when requesting and using OAuth2 tokens. If not specified, a default
# OAuth2 client for the gsutil tool is used; for uses of the boto library (with
# OAuth2 authentication plugin) in other client software, it is recommended to
# use a tool/client-specific OAuth2 client. For more information on OAuth2, see
# http://code.google.com/apis/accounts/docs/OAuth2.html
"""
class ConfigCommand(Command):
"""Implementation of gsutil config command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'config',
command_name_aliases=['cfg', 'conf', 'configure'],
usage_synopsis=_SYNOPSIS,
min_args=0,
max_args=0,
supported_sub_args='abefhno:rs:w',
supported_private_args=['reauth'],
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=0,
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='config',
help_name_aliases=['cfg', 'conf', 'configure', 'aws', 's3'],
help_type='command_help',
help_one_line_summary=(
'Obtain credentials and create configuration file'),
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def _OpenConfigFile(self, file_path):
"""Creates and opens a configuration file for writing.
The file is created with mode 0600, and attempts to open existing files will
fail (the latter is important to prevent symlink attacks).
It is the caller's responsibility to close the file.
Args:
file_path: Path of the file to be created.
Returns:
A writable file object for the opened file.
Raises:
CommandException: if an error occurred when opening the file (including
when the file already exists).
"""
flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
# Accommodate Windows; copied from python2.6/tempfile.py.
if hasattr(os, 'O_NOINHERIT'):
flags |= os.O_NOINHERIT
try:
fd = os.open(file_path, flags, 0o600)
except (OSError, IOError) as e:
raise CommandException('Failed to open %s for writing: %s' %
(file_path, e))
return os.fdopen(fd, 'w')
def _CheckPrivateKeyFilePermissions(self, file_path):
"""Checks that the file has reasonable permissions for a private key.
In particular, check that the filename provided by the user is not
world- or group-readable. If either of these are true, we issue a warning
and offer to fix the permissions.
Args:
file_path: The name of the private key file.
"""
if system_util.IS_WINDOWS:
# For Windows, this check doesn't work (it actually just checks whether
# the file is read-only). Since Windows files have a complicated ACL
# system, this check doesn't make much sense on Windows anyway, so we
# just don't do it.
return
st = os.stat(file_path)
if bool((stat.S_IRGRP | stat.S_IROTH) & st.st_mode):
self.logger.warn(
'\nYour private key file is readable by people other than yourself.\n'
'This is a security risk, since anyone with this information can use '
'your service account.\n')
fix_it = input('Would you like gsutil to change the file '
'permissions for you? (y/N) ')
if fix_it in ('y', 'Y'):
try:
os.chmod(file_path, 0o400)
self.logger.info(
'\nThe permissions on your file have been successfully '
'modified.'
'\nThe only access allowed is readability by the user '
'(permissions 0400 in chmod).')
except Exception as _: # pylint: disable=broad-except
self.logger.warn(
'\nWe were unable to modify the permissions on your file.\n'
'If you would like to fix this yourself, consider running:\n'
'"sudo chmod 400 </path/to/key>" for improved security.')
else:
self.logger.info(
'\nYou have chosen to allow this file to be readable by others.\n'
'If you would like to fix this yourself, consider running:\n'
'"sudo chmod 400 </path/to/key>" for improved security.')
def _PromptForProxyConfigVarAndMaybeSaveToBotoConfig(self,
varname,
prompt,
convert_to_bool=False):
"""Prompts for one proxy config line, saves to boto.config if not empty.
Args:
varname: The config variable name.
prompt: The prompt to output to the user.
convert_to_bool: Whether to convert "y/n" to True/False.
"""
value = input(prompt)
if value:
if convert_to_bool:
if value == 'y' or value == 'Y':
value = 'True'
else:
value = 'False'
boto.config.set('Boto', varname, value)
def _PromptForProxyConfig(self):
"""Prompts for proxy config data, loads non-empty values into boto.config.
"""
self._PromptForProxyConfigVarAndMaybeSaveToBotoConfig(
'proxy', 'What is your proxy host? ')
self._PromptForProxyConfigVarAndMaybeSaveToBotoConfig(
'proxy_type', 'What is your proxy type (socks4, socks5, http)? ')
self._PromptForProxyConfigVarAndMaybeSaveToBotoConfig(
'proxy_port', 'What is your proxy port? ')
self._PromptForProxyConfigVarAndMaybeSaveToBotoConfig(
'proxy_user', 'What is your proxy user (leave blank if not used)? ')
self._PromptForProxyConfigVarAndMaybeSaveToBotoConfig(
'proxy_pass', 'What is your proxy pass (leave blank if not used)? ')
self._PromptForProxyConfigVarAndMaybeSaveToBotoConfig(
'proxy_rdns',
'Should DNS lookups be resolved by your proxy? (Y if your site '
'disallows client DNS lookups; NOT supported for socks)? ',
convert_to_bool=True)
def _WriteConfigLineMaybeCommented(self, config_file, name, value, desc):
"""Writes proxy name/value pair or comment line to config file.
Writes proxy name/value pair if value is not None. Otherwise writes
comment line.
Args:
config_file: File object to which the resulting config file will be
written.
name: The config variable name.
value: The value, or None.
desc: Human readable description (for comment).
"""
if not value:
name = '#%s' % name
value = '<%s>' % desc
config_file.write('%s = %s\n' % (name, value))
def _WriteProxyConfigFileSection(self, config_file):
"""Writes proxy section of configuration file.
Args:
config_file: File object to which the resulting config file will be
written.
"""
config = boto.config
config_file.write(
'# To use a proxy, edit and uncomment the proxy and proxy_port lines.\n'
'# If you need a user/password with this proxy, edit and uncomment\n'
'# those lines as well. If your organization also disallows DNS\n'
'# lookups by client machines, set proxy_rdns to True (the default).\n'
'# If you have installed gsutil through the Cloud SDK and have \n'
'# configured proxy settings in gcloud, those proxy settings will \n'
'# override any other options (including those set here, along with \n'
'# any settings in proxy-related environment variables). Otherwise, \n'
'# if proxy_host and proxy_port are not specified in this file and\n'
'# one of the OS environment variables http_proxy, https_proxy, or\n'
'# HTTPS_PROXY is defined, gsutil will use the proxy server specified\n'
'# in these environment variables, in order of precedence according\n'
'# to how they are listed above.\n')
self._WriteConfigLineMaybeCommented(config_file, 'proxy',
config.get_value('Boto', 'proxy', None),
'proxy host')
self._WriteConfigLineMaybeCommented(
config_file, 'proxy_type', config.get_value('Boto', 'proxy_type', None),
'proxy type (socks4, socks5, http) | Defaults to http')
self._WriteConfigLineMaybeCommented(
config_file, 'proxy_port', config.get_value('Boto', 'proxy_port', None),
'proxy port')
self._WriteConfigLineMaybeCommented(
config_file, 'proxy_user', config.get_value('Boto', 'proxy_user', None),
'proxy user')
self._WriteConfigLineMaybeCommented(
config_file, 'proxy_pass', config.get_value('Boto', 'proxy_pass', None),
'proxy password')
self._WriteConfigLineMaybeCommented(
config_file, 'proxy_rdns', config.get_value('Boto', 'proxy_rdns',
False),
'let proxy server perform DNS lookups (True,False); socks proxy not supported'
)
# pylint: disable=dangerous-default-value,too-many-statements
def _WriteBotoConfigFile(self,
config_file,
launch_browser=True,
oauth2_scopes=[constants.Scopes.CLOUD_PLATFORM],
cred_type=CredTypes.OAUTH2_USER_ACCOUNT,
configure_auth=True):
"""Creates a boto config file interactively.
Needed credentials are obtained interactively, either by asking the user for
access key and secret, or by walking the user through the OAuth2 approval
flow.
Args:
config_file: File object to which the resulting config file will be
written.
launch_browser: In the OAuth2 approval flow, attempt to open a browser
window and navigate to the approval URL.
oauth2_scopes: A list of OAuth2 scopes to request authorization for, when
using OAuth2.
cred_type: There are three options:
- for HMAC, ask the user for access key and secret
- for OAUTH2_USER_ACCOUNT, walk the user through OAuth2 approval flow
and produce a config with an oauth2_refresh_token credential.
- for OAUTH2_SERVICE_ACCOUNT, prompt the user for OAuth2 for service
account email address and private key file (and if the file is a .p12
file, the password for that file).
configure_auth: Boolean, whether or not to configure authentication in
the generated file.
"""
# Collect credentials
provider_map = {'aws': 'aws', 'google': 'gs'}
uri_map = {'aws': 's3', 'google': 'gs'}
key_ids = {}
sec_keys = {}
service_account_key_is_json = False
if configure_auth:
if cred_type == CredTypes.OAUTH2_SERVICE_ACCOUNT:
gs_service_key_file = input('What is the full path to your private '
'key file? ')
# JSON files have the email address built-in and don't require a
# password.
try:
with open(gs_service_key_file, 'rb') as key_file_fp:
json.loads(key_file_fp.read())
service_account_key_is_json = True
except ValueError:
if not HAS_CRYPTO:
raise CommandException(
'Service account authentication via a .p12 file requires '
'either\nPyOpenSSL or PyCrypto 2.6 or later. Please install '
'either of these\nto proceed, use a JSON-format key file, or '
'configure a different type of credentials.')
if not service_account_key_is_json:
gs_service_client_id = input('What is your service account email '
'address? ')
gs_service_key_file_password = input('\n'.join(
textwrap.wrap(
'What is the password for your service key file [if you '
'haven\'t set one explicitly, leave this line blank]?')) +
' ')
self._CheckPrivateKeyFilePermissions(gs_service_key_file)
elif cred_type == CredTypes.OAUTH2_USER_ACCOUNT:
oauth2_client = oauth2_helper.OAuth2ClientFromBotoConfig(
boto.config, cred_type)
try:
oauth2_refresh_token = oauth2_helper.OAuth2ApprovalFlow(
oauth2_client, oauth2_scopes, launch_browser)
except (ResponseNotReady, ServerNotFoundError, socket.error):
# TODO: Determine condition to check for in the ResponseNotReady
# exception so we only run proxy config flow if failure was caused by
# request being blocked because it wasn't sent through proxy. (This
# error could also happen if gsutil or the oauth2 client had a bug
# that attempted to incorrectly reuse an HTTP connection, for
# example.)
sys.stdout.write('\n'.join(
textwrap.wrap(
"Unable to connect to accounts.google.com during OAuth2 flow. "
"This can happen if your site uses a proxy. If you are using "
"gsutil through a proxy, please enter the proxy's information; "
"otherwise leave the following fields blank.")) + '\n')
self._PromptForProxyConfig()
oauth2_client = oauth2_helper.OAuth2ClientFromBotoConfig(
boto.config, cred_type)
oauth2_refresh_token = oauth2_helper.OAuth2ApprovalFlow(
oauth2_client, oauth2_scopes, launch_browser)
elif cred_type == CredTypes.HMAC:
got_creds = False
for provider in provider_map:
if provider == 'google':
key_ids[provider] = input('What is your %s access key ID? ' %
provider)
sec_keys[provider] = input('What is your %s secret access '
'key? ' % provider)
got_creds = True
if not key_ids[provider] or not sec_keys[provider]:
raise CommandException(
'Incomplete credentials provided. Please try again.')
if not got_creds:
raise CommandException('No credentials provided. Please try again.')
# Write the config file prelude.
config_file.write(CONFIG_PRELUDE_CONTENT.lstrip())
config_file.write(
'# This file was created by gsutil version %s at %s.\n' %
(gslib.VERSION, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
config_file.write(
'#\n# You can create additional configuration files by '
'running\n# gsutil config [options] [-o <config-file>]\n\n\n')
# Write the config file Credentials section.
config_file.write('[Credentials]\n\n')
if configure_auth:
if cred_type == CredTypes.OAUTH2_SERVICE_ACCOUNT:
config_file.write('# Google OAuth2 service account credentials '
'(for "gs://" URIs):\n')
config_file.write('gs_service_key_file = %s\n' % gs_service_key_file)
if not service_account_key_is_json:
config_file.write('gs_service_client_id = %s\n' %
gs_service_client_id)
if not gs_service_key_file_password:
config_file.write(
'# If you would like to set your password, you can do so\n'
'# using the following commands (replaced with your\n'
'# information):\n'
'# "openssl pkcs12 -in cert1.p12 -out temp_cert.pem"\n'
'# "openssl pkcs12 -export -in temp_cert.pem -out cert2.p12"\n'
'# "rm -f temp_cert.pem"\n'
'# Your initial password is "notasecret" - for more\n'
'# information, please see \n'
'# http://www.openssl.org/docs/apps/pkcs12.html.\n')
config_file.write('#gs_service_key_file_password =\n\n')
else:
config_file.write('gs_service_key_file_password = %s\n\n' %
gs_service_key_file_password)
elif cred_type == CredTypes.OAUTH2_USER_ACCOUNT:
config_file.write(
'# Google OAuth2 credentials (for "gs://" URIs):\n'
'# The following OAuth2 account is authorized for scope(s):\n')
for scope in oauth2_scopes:
config_file.write('# %s\n' % scope)
config_file.write('gs_oauth2_refresh_token = %s\n\n' %
oauth2_refresh_token)
else:
config_file.write(
'# To add Google OAuth2 credentials ("gs://" URIs), '
'edit and uncomment the\n# following line:\n'
'#gs_oauth2_refresh_token = <your OAuth2 refresh token>\n\n')
else:
if system_util.InvokedViaCloudSdk():
config_file.write(
'# Google OAuth2 credentials are managed by the Cloud SDK and\n'
'# do not need to be present in this file.\n')
for provider in provider_map:
key_prefix = provider_map[provider]
uri_scheme = uri_map[provider]
if provider in key_ids and provider in sec_keys:
config_file.write('# %s credentials ("%s://" URIs):\n' %
(provider, uri_scheme))
config_file.write('%s_access_key_id = %s\n' %
(key_prefix, key_ids[provider]))
config_file.write('%s_secret_access_key = %s\n' %
(key_prefix, sec_keys[provider]))
else:
config_file.write(
'# To add HMAC %s credentials for "%s://" URIs, edit and '
'uncomment the\n# following two lines:\n'
'#%s_access_key_id = <your %s access key ID>\n'
'#%s_secret_access_key = <your %s secret access key>\n' %
(provider, uri_scheme, key_prefix, provider, key_prefix, provider))
host_key = Provider.HostKeyMap[provider]
config_file.write(
'# The ability to specify an alternate storage host and port\n'
'# is primarily for cloud storage service developers.\n'
'# Setting a non-default gs_host only works if prefer_api=xml.\n'
'#%s_host = <alternate storage host address>\n'
'#%s_port = <alternate storage host port>\n'
'# In some cases, (e.g. VPC requests) the "host" HTTP header should\n'
'# be different than the host used in the request URL.\n'
'#%s_host_header = <alternate storage host header>\n' %
(host_key, host_key, host_key))
if host_key == 'gs':
config_file.write(
'#%s_json_host = <alternate JSON API storage host address>\n'
'#%s_json_port = <alternate JSON API storage host port>\n'
'#%s_json_host_header = <alternate JSON API storage host header>\n\n'
% (host_key, host_key, host_key))
config_file.write(
'# To impersonate a service account for "%s://" URIs over\n'
'# JSON API, edit and uncomment the following line:\n'
'#%s_impersonate_service_account = <service account email>\n\n')
# Add device certificate mTLS fields.
config_file.write(
textwrap.dedent("""\
# This configuration setting enables or disables mutual TLS
# authentication. The default value for this setting is "false". When
# set to "true", gsutil uses the configured client certificate as
# transport credential to access the APIs. The use of mTLS ensures that
# the access originates from a trusted enterprise device. When enabled,
# the client certificate is auto discovered using the endpoint
# verification agent. When set to "true" but no client certificate or
# key is found, users receive an error.
#use_client_certificate = False
# The command line to execute, which prints the
# certificate, private key, or password to use in
# conjunction with "use_client_certificate = True".
#cert_provider_command = <Absolute path to command to run for
# certification. Ex: "/scripts/gen_cert.sh">
"""))
# Write the config file Boto section.
config_file.write('%s\n' % CONFIG_BOTO_SECTION_CONTENT)
self._WriteProxyConfigFileSection(config_file)
# Write the Google Compute Engine section.
config_file.write(CONFIG_GOOGLECOMPUTE_SECTION_CONTENT)
# Write the config file GSUtil section that doesn't depend on user input.
config_file.write(CONFIG_INPUTLESS_GSUTIL_SECTION_CONTENT)
# Write the default API version.
config_file.write("""
# 'default_api_version' specifies the default Google Cloud Storage XML API
# version to use. If not set below gsutil defaults to API version 1.
""")
api_version = 2
if cred_type == CredTypes.HMAC:
api_version = 1
config_file.write('default_api_version = %d\n' % api_version)
# Write the config file GSUtil section that includes the default
# project ID input from the user.
if not system_util.InvokedViaCloudSdk():
if launch_browser:
sys.stdout.write(
'Attempting to launch a browser to open the Google Cloud Console '
'at URL: %s\n\n'
'[Note: due to a Python bug, you may see a spurious error message '
'"object is not\ncallable [...] in [...] Popen.__del__" which can '
'be ignored.]\n\n' % GOOG_CLOUD_CONSOLE_URI)
sys.stdout.write(
'In your browser you should see the Cloud Console. Find the '
'project you will\nuse, and then copy the Project ID string from '
'the second '
'column. Older projects do\nnot have Project ID strings. For such '
'projects, click the project and then copy the\nProject Number '
'listed under that project.\n\n')
if not webbrowser.open(GOOG_CLOUD_CONSOLE_URI, new=1, autoraise=True):
sys.stdout.write(
'Launching browser appears to have failed; please navigate a '
'browser to the following URL:\n%s\n' % GOOG_CLOUD_CONSOLE_URI)
# Short delay; webbrowser.open on linux insists on printing out a
# message which we don't want to run into the prompt for the auth code.
time.sleep(2)
else:
sys.stdout.write(
'\nPlease navigate your browser to %s,\nthen find the project '
'you will use, and copy the Project ID string from the\nsecond '
'column. Older projects do not have Project ID strings. For such '
'projects,\n click the project and then copy the Project Number '
'listed under that project.\n\n' % GOOG_CLOUD_CONSOLE_URI)
default_project_id = input('What is your project-id? ').strip()
project_id_section_prelude = """
# 'default_project_id' specifies the default Google Cloud Storage project ID to
# use with the 'mb' and 'ls' commands. This default can be overridden by
# specifying the -p option to the 'mb' and 'ls' commands.
"""
if not default_project_id:
raise CommandException(
'No default project ID entered. The default project ID is needed '
'by the\nls and mb commands; please try again.')
config_file.write('%sdefault_project_id = %s\n\n\n' %
(project_id_section_prelude, default_project_id))
CheckAndMaybePromptForAnalyticsEnabling()
# Write the config file OAuth2 section that doesn't depend on user input.
config_file.write(CONFIG_OAUTH2_CONFIG_CONTENT)
# If the user ran gsutil config with a custom client ID, write that to the
# config file.
if (cred_type == CredTypes.OAUTH2_USER_ACCOUNT and configure_auth and
oauth2_client.client_id != oauth2_helper.CLIENT_ID and
oauth2_client.client_secret != oauth2_helper.CLIENT_SECRET):
config_file.write('client_id = %s\nclient_secret = %s\n' %
(oauth2_client.client_id, oauth2_client.client_secret))
else:
config_file.write('#client_id = <OAuth2 client id>\n'
'#client_secret = <OAuth2 client secret>\n')
def RunCommand(self):
"""Command entry point for the config command."""
scopes = []
cred_type = CredTypes.OAUTH2_USER_ACCOUNT
launch_browser = False
output_file_name = None
has_a = False
has_e = False
configure_auth = True
for opt, opt_arg in self.sub_opts:
if opt == '-a':
cred_type = CredTypes.HMAC
has_a = True
elif opt == '-b':
launch_browser = True
elif opt == '-e':
cred_type = CredTypes.OAUTH2_SERVICE_ACCOUNT
has_e = True
elif opt == '-f':
scopes.append(constants.Scopes.FULL_CONTROL)
elif opt == '-n':
configure_auth = False
elif opt == '-o':
output_file_name = opt_arg
elif opt == '-r':
scopes.append(constants.Scopes.READ_ONLY)
elif opt == '--reauth':
scopes.append(constants.Scopes.REAUTH)
elif opt == '-s':
scopes.append(opt_arg)
elif opt == '-w':
scopes.append(constants.Scopes.READ_WRITE)
else:
self.RaiseInvalidArgumentException()
if has_e and has_a:
raise CommandException('Both -a and -e cannot be specified. Please see '
'"gsutil help config" for more information.')
if not configure_auth and (has_a or has_e or scopes or launch_browser):
raise CommandException(
'The -a, -b, -e, -f, -r, --reauth, -s, and -w flags cannot be '
'specified with the -n flag. Please see "gsutil help config" for '
'more information.')
# Don't allow users to configure Oauth2 (any option other than -a and -n)
# when running in the Cloud SDK, unless they have the Cloud SDK configured
# not to pass credentials to gsutil.
if (system_util.InvokedViaCloudSdk() and
system_util.CloudSdkCredPassingEnabled() and not has_a and
configure_auth):
raise CommandException('\n'.join([
'OAuth2 is the preferred authentication mechanism with the Cloud '
'SDK.',
'Run "gcloud auth login" to configure authentication, unless:',
'\n'.join(
textwrap.wrap(
'You don\'t want gsutil to use OAuth2 credentials from the Cloud '
'SDK, but instead want to manage credentials with .boto files '
'generated by running "gsutil config"; in which case run '
'"gcloud config set pass_credentials_to_gsutil false".',
initial_indent='- ',
subsequent_indent=' ')),
'\n'.join(
textwrap.wrap(
'You want to authenticate with an HMAC access key and secret, in '
'which case run "gsutil config -a".',
initial_indent='- ',
subsequent_indent=' ')),
]))
if system_util.InvokedViaCloudSdk() and has_a:
sys.stderr.write('\n'.join(
textwrap.wrap(
'This command will configure HMAC credentials, but gsutil will use '
'OAuth2 credentials from the Cloud SDK by default. To make sure '
'the HMAC credentials are used, run: "gcloud config set '
'pass_credentials_to_gsutil false".')) + '\n\n')
if not scopes:
scopes.append(constants.Scopes.CLOUD_PLATFORM)
scopes.append(constants.Scopes.REAUTH)
default_config_path_bak = None
if not output_file_name:
# Check to see if a default config file name is requested via
# environment variable. If so, use it, otherwise use the hard-coded
# default file. Then use the default config file name, if it doesn't
# exist or can be moved out of the way without clobbering an existing
# backup file.
boto_config_from_env = os.environ.get('BOTO_CONFIG', None)
if boto_config_from_env:
default_config_path = boto_config_from_env
else:
default_config_path = os.path.expanduser(os.path.join('~', '.boto'))
if not os.path.exists(default_config_path):
output_file_name = default_config_path
else:
default_config_path_bak = default_config_path + '.bak'
if os.path.exists(default_config_path_bak):
raise CommandException('Cannot back up existing config '
'file "%s": backup file exists ("%s").' %
(default_config_path, default_config_path_bak))
else:
try:
sys.stderr.write(
'Backing up existing config file "%s" to "%s"...\n' %
(default_config_path, default_config_path_bak))
os.rename(default_config_path, default_config_path_bak)
except Exception as e:
raise CommandException(
'Failed to back up existing config '
'file ("%s" -> "%s"): %s.' %
(default_config_path, default_config_path_bak, e))
output_file_name = default_config_path
if output_file_name == '-':
output_file = sys.stdout
else:
output_file = self._OpenConfigFile(output_file_name)
sys.stderr.write('\n'.join(
textwrap.wrap(
'This command will create a boto config file at %s containing your '
'credentials, based on your responses to the following questions.'
% output_file_name)) + '\n')
# Catch ^C so we can restore the backup.
RegisterSignalHandler(signal.SIGINT, _CleanupHandler)
try:
self._WriteBotoConfigFile(output_file,
launch_browser=launch_browser,
oauth2_scopes=scopes,
cred_type=cred_type,
configure_auth=configure_auth)
except Exception as e:
user_aborted = isinstance(e, AbortException)
if user_aborted:
sys.stderr.write('\nCaught ^C; cleaning up\n')
# If an error occurred during config file creation, remove the invalid
# config file and restore the backup file.
if output_file_name != '-':
output_file.close()
os.unlink(output_file_name)
try:
if default_config_path_bak:
sys.stderr.write('Restoring previous backed up file (%s)\n' %
default_config_path_bak)
os.rename(default_config_path_bak, output_file_name)
except Exception as e:
# Raise the original exception so that we can see what actually went
# wrong, rather than just finding out that we died before assigning
# a value to default_config_path_bak.
raise e
raise
if output_file_name != '-':
output_file.close()
if not boto.config.has_option('Boto', 'proxy'):
sys.stderr.write('\n' + '\n'.join(
textwrap.wrap(
'Boto config file "%s" created.\nIf you need to use a proxy to '
'access the Internet please see the instructions in that file.'
% output_file_name)) + '\n')
return 0
def _CleanupHandler(unused_signalnum, unused_handler):
raise AbortException('User interrupted config command')
| {
"content_hash": "210443f5236fb1d1af2d57ac7ca41dd9",
"timestamp": "",
"source": "github",
"line_count": 1256,
"max_line_length": 105,
"avg_line_length": 47.44904458598726,
"alnum_prop": 0.6822605543996242,
"repo_name": "catapult-project/catapult",
"id": "e5a3d7e2fb48dabddd64d49e5622a0c6550fc9f0",
"size": "60216",
"binary": false,
"copies": "9",
"ref": "refs/heads/main",
"path": "third_party/gsutil/gslib/commands/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
"""Useful wrappers and other tools."""
import re
from collections import namedtuple
from streamlink.utils import parse_json, parse_qsd as parse_query, parse_xml
__all__ = ["parse_json", "parse_xml", "parse_query"]
tag_re = re.compile(r'''(?=<(?P<tag>[a-zA-Z]+)(?P<attr>.*?)(?P<end>/)?>(?:(?P<inner>.*?)</\s*(?P=tag)\s*>)?)''',
re.MULTILINE | re.DOTALL)
attr_re = re.compile(r'''\s*(?P<key>[\w-]+)\s*(?:=\s*(?P<quote>["']?)(?P<value>.*?)(?P=quote)\s*)?''')
Tag = namedtuple("Tag", "tag attributes text")
def itertags(html, tag):
"""
Brute force regex based HTML tag parser. This is a rough-and-ready searcher to find HTML tags when
standards compliance is not required. Will find tags that are commented out, or inside script tag etc.
:param html: HTML page
:param tag: tag name to find
:return: generator with Tags
"""
for match in tag_re.finditer(html):
if match.group("tag") == tag:
attrs = dict((a.group("key").lower(), a.group("value")) for a in attr_re.finditer(match.group("attr")))
yield Tag(match.group("tag"), attrs, match.group("inner"))
| {
"content_hash": "699bcc2c0f2c6bee9823f24965bb3ce0",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 115,
"avg_line_length": 40.67857142857143,
"alnum_prop": 0.607550482879719,
"repo_name": "beardypig/streamlink",
"id": "d64f11ba04427e1d8d37b4a6cbfebedccd46a415",
"size": "1139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/streamlink/plugin/api/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1538432"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TicklabeloverflowValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="ticklabeloverflow",
parent_name="scattercarpet.marker.colorbar",
**kwargs,
):
super(TicklabeloverflowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["allow", "hide past div", "hide past domain"]),
**kwargs,
)
| {
"content_hash": "e9ddf16797f4d1f00bef01f059459ec0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 88,
"avg_line_length": 34.470588235294116,
"alnum_prop": 0.6092150170648464,
"repo_name": "plotly/plotly.py",
"id": "44f5514fd43d9f326da14264b84e799691d0f309",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattercarpet/marker/colorbar/_ticklabeloverflow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""
Uber OAuth2 backend, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/uber.html
"""
from .oauth import BaseOAuth2
class UberOAuth2(BaseOAuth2):
name = 'uber'
ID_KEY='uuid'
SCOPE_SEPARATOR = ' '
AUTHORIZATION_URL = 'https://login.uber.com/oauth/authorize'
ACCESS_TOKEN_URL = 'https://login.uber.com/oauth/token'
ACCESS_TOKEN_METHOD = 'POST'
def auth_complete_credentials(self):
return self.get_key_and_secret()
def get_user_details(self, response):
"""Return user details from Uber account"""
email = response.get('email', '')
fullname, first_name, last_name = self.get_user_names(
'',
response.get('first_name', ''),
response.get('last_name', '')
)
return {'username': email,
'email': email,
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
response = kwargs.pop('response')
return self.get_json('https://api.uber.com/v1/me', headers={
'Authorization': '{0} {1}'.format(response.get('token_type'),
access_token)
})
| {
"content_hash": "fd30656010d20814ae1b6e20fd1fcc27",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 34.17948717948718,
"alnum_prop": 0.5603900975243811,
"repo_name": "IKholopov/HackUPC2017",
"id": "e95a0a05cabf31d92993902e1ee12574092e67c7",
"size": "1333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hackupc/env/lib/python3.5/site-packages/social_core/backends/uber.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63043"
},
{
"name": "HTML",
"bytes": "40996"
},
{
"name": "JavaScript",
"bytes": "272171"
},
{
"name": "Python",
"bytes": "40280"
}
],
"symlink_target": ""
} |
import hf, logging
from sqlalchemy import *
from datetime import datetime,timedelta
from time import mktime
import modules.feedparser
class RSSFeed(hf.module.ModuleBase):
config_keys = {
'source': ('URL of the RSS feed', '')
# 'entries': ('Number of entries', '-1'),
# 'days': ('Show only entries from the last n days', '7'),
# 'hide_feed_title': ('Hide feed title', '0')
}
config_hint = ''
table_columns = [
Column('title', TEXT),
], []
subtable_columns = {'feeds': ([
Column('author', TEXT),
Column('title', TEXT),
Column('link', TEXT),
Column('published', INT),
Column('content', TEXT),
], [])}
def prepareAcquisition(self):
if 'source' not in self.config: raise hf.exceptions.ConfigError('source option not set!')
self.source = hf.downloadService.addDownload(self.config['source'])
self.status = 1.0
self.details_db_value_list = []
def extractData(self):
data = {'source_url': self.source.getSourceUrl(),
'status': self.status}
feed = modules.feedparser.parse(self.source.getTmpPath())
data['title'] = feed.feed.title
# Sort entries by date
try:
feed.entries.sort(lambda x,y: cmp(y.published_parsed,x.published_parsed))
except:
pass
entries = 0
for entry in feed.entries:
# TODO Skip entries older than ndays
details_db_values = {}
details_db_values['author'] = ''
details_db_values['title'] = entry.title
details_db_values['link'] = entry.link
# Convert published time to unix time integer
try:
details_db_values['published'] = int(mktime(entry.published_parsed))
except:
details_db_values['published'] = 0
details_db_values['content'] = entry.summary
self.details_db_value_list.append(details_db_values)
entries += 1
# TODO only show n entries
data['status'] = self.status
return data
def fillSubtables(self, parent_id):
self.subtables['feeds'].insert().execute([dict(parent_id=parent_id, **row) for row in self.details_db_value_list])
def getTemplateData(self):
data = hf.module.ModuleBase.getTemplateData(self)
info_list = self.subtables['feeds'].select().where(self.subtables['feeds'].c.parent_id==self.dataset['id']).execute().fetchall()
data['feed_list'] = map(dict, info_list)
return data
| {
"content_hash": "52860426678159c53143b88c31e10f25",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 136,
"avg_line_length": 31.571428571428573,
"alnum_prop": 0.5727752639517345,
"repo_name": "HappyFaceGoettingen/HappyFaceCore",
"id": "87f94f99aaf8d244d1bdb9bdcdb5493b9f3ce5a5",
"size": "2652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/RSSFeed.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16086"
},
{
"name": "HTML",
"bytes": "64536"
},
{
"name": "JavaScript",
"bytes": "17341"
},
{
"name": "Python",
"bytes": "539101"
}
],
"symlink_target": ""
} |
"""
tmdbsimple.discover
~~~~~~~~~~~~~~~~~~~
This module implements the Discover functionality of tmdbsimple.
Created by Celia Oakley on 2013-10-31.
:copyright: (c) 2013-2017 by Celia Oakley
:license: GPLv3, see LICENSE for more details
"""
from .base import TMDB
class Discover(TMDB):
"""
Discover functionality.
See: http://docs.themoviedb.apiary.io/#discover
"""
BASE_PATH = 'discover'
URLS = {
'movie': '/movie',
'tv': '/tv',
}
def movie(self, **kwargs):
"""
Discover movies by different types of data like average rating,
number of votes, genres and certifications.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639-1 code.
sort_by: (optional) Available options are 'vote_average.desc',
'vote_average.asc', 'release_date.desc', 'release_date.asc'
'popularity.desc', 'popularity.asc'.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is a boolean, True or False.
year: (optional) Filter the results release dates to matches that
include this value. Expected value is a year.
primary_release_year: (optional) Filter the results so that
only the primary release date year has
this value. Expected value is a year.
vote_count.gte or vote_count_gte: (optional) Only include movies
that are equal to, or have a vote count higher
than this value. Expected value is an integer.
vote_average.gte or vote_average_gte: (optional) Only include
movies that are equal to, or have a higher
average rating than this value. Expected value
is a float.
with_genres: (optional) Only include movies with the specified
genres. Expected value is an integer (the id of
a genre). Multiple values can be specified.
Comma separated indicates an 'AND' query, while
a pipe (|) separated value indicates an 'OR'.
release_date.gte or release_date_gte: (optional) The minimum
release to include. Expected format is
'YYYY-MM-DD'.
releaste_date.lte or release_date_lte: (optional) The maximum
release to include. Expected format is
'YYYY-MM-DD'.
certification_country: (optional) Only include movies with
certifications for a specific country. When
this value is specified, 'certification.lte'
is required. An ISO 3166-1 is expected.
certification.lte or certification_lte: (optional) Only include
movies with this certification and lower.
Expected value is a valid certification for
the specified 'certification_country'.
with_companies: (optional) Filter movies to include a specific
company. Expected value is an integer (the id
of a company). They can be comma separated
to indicate an 'AND' query.
Returns:
A dict respresentation of the JSON returned from the API.
"""
# Periods are not allowed in keyword arguments but several API
# arguments contain periods. See both usages in tests/test_discover.py.
for param in kwargs:
if '_lte' in param:
kwargs[param.replace('_lte', '.lte')] = kwargs.pop(param)
if '_gte' in param:
kwargs[param.replace('_gte', '.gte')] = kwargs.pop(param)
path = self._get_path('movie')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def tv(self, **kwargs):
"""
Discover TV shows by different types of data like average rating,
number of votes, genres, the network they aired on and air dates.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639-1 code.
sort_by: (optional) Available options are 'vote_average.desc',
'vote_average.asc', 'first_air_date.desc',
'first_air_date.asc', 'popularity.desc', 'popularity.asc'
first_air_year: (optional) Filter the results release dates to
matches that include this value. Expected value
is a year.
vote_count.gte or vote_count_gte: (optional) Only include TV shows
that are equal to,
or have vote count higher than this value. Expected
value is an integer.
vote_average.gte or vote_average_gte: (optional) Only include TV
shows that are equal
to, or have a higher average rating than this
value. Expected value is a float.
with_genres: (optional) Only include TV shows with the specified
genres. Expected value is an integer (the id of a
genre). Multiple valued can be specified. Comma
separated indicates an 'AND' query, while a
pipe (|) separated value indicates an 'OR'.
with_networks: (optional) Filter TV shows to include a specific
network. Expected value is an integer (the id of a
network). They can be comma separated to indicate an
'AND' query.
first_air_date.gte or first_air_date_gte: (optional) The minimum
release to include.
Expected format is 'YYYY-MM-DD'.
first_air_date.lte or first_air_date_lte: (optional) The maximum
release to include.
Expected format is 'YYYY-MM-DD'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
# Periods are not allowed in keyword arguments but several API
# arguments contain periods. See both usages in tests/test_discover.py.
for param in kwargs:
if '_lte' in param:
kwargs[param.replace('_lte', '.lte')] = kwargs.pop(param)
if '_gte' in param:
kwargs[param.replace('_gte', '.gte')] = kwargs.pop(param)
path = self._get_path('tv')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| {
"content_hash": "bd74130a86d5dc4ebc718028ef798e8f",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 80,
"avg_line_length": 49.84827586206897,
"alnum_prop": 0.5301604869950194,
"repo_name": "coronary/RandomEpisode",
"id": "28ac8dd823908dfa4d483e33013f1b6543ae8aa0",
"size": "7253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "depends/Lib/site-packages/tmdbsimple/discover.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1253"
},
{
"name": "C",
"bytes": "403433"
},
{
"name": "C++",
"bytes": "135910"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "4683953"
},
{
"name": "Tcl",
"bytes": "1285363"
}
],
"symlink_target": ""
} |
from ..world.location import Location
from learning import QLearner
class AI:
def __init__(self, learner):
self.learner = learner
self.rewards = []
self.actions = []
self.done = False
# keep track of which sheep are done
self.sheep_done = set([])
def setLearner(self, learner):
self.learner = learner
def getAction(self, state):
return self.learner.getAction(state)
def evaluate(self, old_state, action, new_state):
reward = self.computeReward(old_state, action, new_state)
self.rewards.append(reward)
self.learner.incorporateFeedback(old_state, action, reward, new_state)
def computeReward(self, state, action, new_state):
return 0.0
def reset(self):
''' resets rewards and actions '''
total_reward = sum(self.rewards)
self.rewards = []
self.actions = []
self.done = False
self.sheep_done = set([])
return total_reward
class GoTargetAI(AI):
'''
The GoTargetAI implements the simple task of moving the dog itself to a target.
'''
def computeReward(self, state, action, new_state):
'''
Return 1 if dog is at target
'''
# convert to location so we can use distance function
own_location = Location(new_state.own_location[0], new_state.own_location[1])
distance_target = own_location.distance(new_state.target_location)
if distance_target < new_state.target_radius:
self.done = True
return 1.0
return 0.0
class HerdSheepAI(AI):
'''
The HerdSheepAI implements the task of moving sheep to the target.
'''
def computeReward(self, state, action, new_state):
'''
Return +1 for each sheep in target location
'''
# convert to location so we can use distance function
reward = 0
for k, sheep in enumerate(state.sheep_locations):
distance_target = Location(sheep[0], sheep[1]).distance(new_state.target_location)
# reward += 2.0 / (1.0 + distance_target)
if distance_target < new_state.target_radius and k not in self.sheep_done:
reward += 1
self.sheep_done.add(k)
# done if all sheep in target location
if len(state.sheep_locations) == len(self.sheep_done):
self.done = True
return reward
| {
"content_hash": "cfca5fff79682eda1b8901d666ab3805",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 94,
"avg_line_length": 31.294871794871796,
"alnum_prop": 0.6042605489553462,
"repo_name": "schmit/sheepherding",
"id": "644f529799e0b49cc7f5fb3e7a4c13b04316ef9c",
"size": "2441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sheepherding/ai/ai.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43915"
}
],
"symlink_target": ""
} |
import weakref
from automaton import machines
from taskflow import logging
from taskflow import states as st
from taskflow.types import failure
from taskflow.utils import iter_utils
# Default waiting state timeout (in seconds).
WAITING_TIMEOUT = 60
# Meta states the state machine uses.
UNDEFINED = 'UNDEFINED'
GAME_OVER = 'GAME_OVER'
META_STATES = (GAME_OVER, UNDEFINED)
# Event name constants the state machine uses.
SCHEDULE = 'schedule_next'
WAIT = 'wait_finished'
ANALYZE = 'examine_finished'
FINISH = 'completed'
FAILED = 'failed'
SUSPENDED = 'suspended'
SUCCESS = 'success'
REVERTED = 'reverted'
START = 'start'
LOG = logging.getLogger(__name__)
class MachineMemory(object):
"""State machine memory."""
def __init__(self):
self.next_up = set()
self.not_done = set()
self.failures = []
self.done = set()
class MachineBuilder(object):
"""State machine *builder* that powers the engine components.
NOTE(harlowja): the machine (states and events that will trigger
transitions) that this builds is represented by the following
table::
+--------------+------------------+------------+----------+---------+
| Start | Event | End | On Enter | On Exit |
+--------------+------------------+------------+----------+---------+
| ANALYZING | completed | GAME_OVER | . | . |
| ANALYZING | schedule_next | SCHEDULING | . | . |
| ANALYZING | wait_finished | WAITING | . | . |
| FAILURE[$] | . | . | . | . |
| GAME_OVER | failed | FAILURE | . | . |
| GAME_OVER | reverted | REVERTED | . | . |
| GAME_OVER | success | SUCCESS | . | . |
| GAME_OVER | suspended | SUSPENDED | . | . |
| RESUMING | schedule_next | SCHEDULING | . | . |
| REVERTED[$] | . | . | . | . |
| SCHEDULING | wait_finished | WAITING | . | . |
| SUCCESS[$] | . | . | . | . |
| SUSPENDED[$] | . | . | . | . |
| UNDEFINED[^] | start | RESUMING | . | . |
| WAITING | examine_finished | ANALYZING | . | . |
+--------------+------------------+------------+----------+---------+
Between any of these yielded states (minus ``GAME_OVER`` and ``UNDEFINED``)
if the engine has been suspended or the engine has failed (due to a
non-resolveable task failure or scheduling failure) the machine will stop
executing new tasks (currently running tasks will be allowed to complete)
and this machines run loop will be broken.
NOTE(harlowja): If the runtimes scheduler component is able to schedule
tasks in parallel, this enables parallel running and/or reversion.
"""
def __init__(self, runtime, waiter):
self._runtime = weakref.proxy(runtime)
self._analyzer = runtime.analyzer
self._completer = runtime.completer
self._scheduler = runtime.scheduler
self._storage = runtime.storage
self._waiter = waiter
def build(self, timeout=None):
"""Builds a state-machine (that is used during running)."""
memory = MachineMemory()
if timeout is None:
timeout = WAITING_TIMEOUT
# Cache some local functions/methods...
do_schedule = self._scheduler.schedule
do_complete = self._completer.complete
def is_runnable():
# Checks if the storage says the flow is still runnable...
return self._storage.get_flow_state() == st.RUNNING
def iter_next_atoms(atom=None, apply_deciders=True):
# Yields and filters and tweaks the next atoms to run...
maybe_atoms_it = self._analyzer.iter_next_atoms(atom=atom)
for atom, late_decider in maybe_atoms_it:
if apply_deciders:
proceed = late_decider.check_and_affect(self._runtime)
if proceed:
yield atom
else:
yield atom
def resume(old_state, new_state, event):
# This reaction function just updates the state machines memory
# to include any nodes that need to be executed (from a previous
# attempt, which may be empty if never ran before) and any nodes
# that are now ready to be ran.
memory.next_up.update(
iter_utils.unique_seen(self._completer.resume(),
iter_next_atoms()))
return SCHEDULE
def game_over(old_state, new_state, event):
# This reaction function is mainly a intermediary delegation
# function that analyzes the current memory and transitions to
# the appropriate handler that will deal with the memory values,
# it is *always* called before the final state is entered.
if memory.failures:
return FAILED
leftover_atoms = iter_utils.count(
# Avoid activating the deciders, since at this point
# the engine is finishing and there will be no more further
# work done anyway...
iter_next_atoms(apply_deciders=False))
if leftover_atoms:
# Ok we didn't finish (either reverting or executing...) so
# that means we must of been stopped at some point...
LOG.blather("Suspension determined to have been reacted to"
" since (at least) %s atoms have been left in an"
" unfinished state", leftover_atoms)
return SUSPENDED
elif self._analyzer.is_success():
return SUCCESS
else:
return REVERTED
def schedule(old_state, new_state, event):
# This reaction function starts to schedule the memory's next
# nodes (iff the engine is still runnable, which it may not be
# if the user of this engine has requested the engine/storage
# that holds this information to stop or suspend); handles failures
# that occur during this process safely...
if is_runnable() and memory.next_up:
not_done, failures = do_schedule(memory.next_up)
if not_done:
memory.not_done.update(not_done)
if failures:
memory.failures.extend(failures)
memory.next_up.intersection_update(not_done)
return WAIT
def wait(old_state, new_state, event):
# TODO(harlowja): maybe we should start doing 'yield from' this
# call sometime in the future, or equivalent that will work in
# py2 and py3.
if memory.not_done:
done, not_done = self._waiter(memory.not_done, timeout=timeout)
memory.done.update(done)
memory.not_done = not_done
return ANALYZE
def analyze(old_state, new_state, event):
# This reaction function is responsible for analyzing all nodes
# that have finished executing and completing them and figuring
# out what nodes are now ready to be ran (and then triggering those
# nodes to be scheduled in the future); handles failures that
# occur during this process safely...
next_up = set()
while memory.done:
fut = memory.done.pop()
atom = fut.atom
try:
outcome, result = fut.result()
retain = do_complete(atom, outcome, result)
if isinstance(result, failure.Failure):
if retain:
memory.failures.append(result)
else:
# NOTE(harlowja): avoid making any
# intention request to storage unless we are
# sure we are in DEBUG enabled logging (otherwise
# we will call this all the time even when DEBUG
# is not enabled, which would suck...)
if LOG.isEnabledFor(logging.DEBUG):
intention = self._storage.get_atom_intention(
atom.name)
LOG.debug("Discarding failure '%s' (in"
" response to outcome '%s') under"
" completion units request during"
" completion of atom '%s' (intention"
" is to %s)", result, outcome,
atom, intention)
except Exception:
memory.failures.append(failure.Failure())
else:
try:
more_work = set(iter_next_atoms(atom=atom))
except Exception:
memory.failures.append(failure.Failure())
else:
next_up.update(more_work)
if is_runnable() and next_up and not memory.failures:
memory.next_up.update(next_up)
return SCHEDULE
elif memory.not_done:
return WAIT
else:
return FINISH
def on_exit(old_state, event):
LOG.debug("Exiting old state '%s' in response to event '%s'",
old_state, event)
def on_enter(new_state, event):
LOG.debug("Entering new state '%s' in response to event '%s'",
new_state, event)
# NOTE(harlowja): when ran in blather mode it is quite useful
# to track the various state transitions as they happen...
watchers = {}
if LOG.isEnabledFor(logging.BLATHER):
watchers['on_exit'] = on_exit
watchers['on_enter'] = on_enter
m = machines.FiniteMachine()
m.add_state(GAME_OVER, **watchers)
m.add_state(UNDEFINED, **watchers)
m.add_state(st.ANALYZING, **watchers)
m.add_state(st.RESUMING, **watchers)
m.add_state(st.REVERTED, terminal=True, **watchers)
m.add_state(st.SCHEDULING, **watchers)
m.add_state(st.SUCCESS, terminal=True, **watchers)
m.add_state(st.SUSPENDED, terminal=True, **watchers)
m.add_state(st.WAITING, **watchers)
m.add_state(st.FAILURE, terminal=True, **watchers)
m.default_start_state = UNDEFINED
m.add_transition(GAME_OVER, st.REVERTED, REVERTED)
m.add_transition(GAME_OVER, st.SUCCESS, SUCCESS)
m.add_transition(GAME_OVER, st.SUSPENDED, SUSPENDED)
m.add_transition(GAME_OVER, st.FAILURE, FAILED)
m.add_transition(UNDEFINED, st.RESUMING, START)
m.add_transition(st.ANALYZING, GAME_OVER, FINISH)
m.add_transition(st.ANALYZING, st.SCHEDULING, SCHEDULE)
m.add_transition(st.ANALYZING, st.WAITING, WAIT)
m.add_transition(st.RESUMING, st.SCHEDULING, SCHEDULE)
m.add_transition(st.SCHEDULING, st.WAITING, WAIT)
m.add_transition(st.WAITING, st.ANALYZING, ANALYZE)
m.add_reaction(GAME_OVER, FINISH, game_over)
m.add_reaction(st.ANALYZING, ANALYZE, analyze)
m.add_reaction(st.RESUMING, START, resume)
m.add_reaction(st.SCHEDULING, SCHEDULE, schedule)
m.add_reaction(st.WAITING, WAIT, wait)
m.freeze()
return (m, memory)
| {
"content_hash": "f8636ce879fbde94cc1fe32fa7d6100d",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 79,
"avg_line_length": 44.9812734082397,
"alnum_prop": 0.5293921731890091,
"repo_name": "pombredanne/taskflow-1",
"id": "9013cd8a917f532d13598a40bbc2685e97b2606b",
"size": "12667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskflow/engines/action_engine/builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1484277"
},
{
"name": "Shell",
"bytes": "1988"
}
],
"symlink_target": ""
} |
"Makes working with XML feel like you are working with JSON"
try:
from defusedexpat import pyexpat as expat
except ImportError:
from xml.parsers import expat
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
try: # pragma no cover
from cStringIO import StringIO
except ImportError: # pragma no cover
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try: # pragma no cover
from collections import OrderedDict
except ImportError: # pragma no cover
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
try: # pragma no cover
_basestring = basestring
except NameError: # pragma no cover
_basestring = str
try: # pragma no cover
_unicode = unicode
except NameError: # pragma no cover
_unicode = str
__author__ = 'Martin Blech'
__version__ = '0.11.0'
__license__ = 'MIT'
class ParsingInterrupted(Exception):
pass
class _DictSAXHandler(object):
def __init__(self,
item_depth=0,
item_callback=lambda *args: True,
xml_attribs=True,
attr_prefix='@',
cdata_key='#text',
force_cdata=False,
cdata_separator='',
postprocessor=None,
dict_constructor=OrderedDict,
strip_whitespace=True,
namespace_separator=':',
namespaces=None,
force_list=None):
self.path = []
self.stack = []
self.data = []
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
self.namespace_separator = namespace_separator
self.namespaces = namespaces
self.namespace_declarations = OrderedDict()
self.force_list = force_list
def _build_name(self, full_name):
if not self.namespaces:
return full_name
i = full_name.rfind(self.namespace_separator)
if i == -1:
return full_name
namespace, name = full_name[:i], full_name[i + 1:]
short_namespace = self.namespaces.get(namespace, namespace)
if not short_namespace:
return name
else:
return self.namespace_separator.join((short_namespace, name))
def _attrs_to_dict(self, attrs):
if isinstance(attrs, dict):
return attrs
return self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
def startNamespaceDecl(self, prefix, uri):
self.namespace_declarations[prefix or ''] = uri
def startElement(self, full_name, attrs):
name = self._build_name(full_name)
attrs = self._attrs_to_dict(attrs)
if attrs and self.namespace_declarations:
attrs['xmlns'] = self.namespace_declarations
self.namespace_declarations = OrderedDict()
self.path.append((name, attrs or None))
if len(self.path) > self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attr_entries = []
for key, value in attrs.items():
key = self.attr_prefix + self._build_name(key)
if self.postprocessor:
entry = self.postprocessor(self.path, key, value)
else:
entry = (key, value)
if entry:
attr_entries.append(entry)
attrs = self.dict_constructor(attr_entries)
else:
attrs = None
self.item = attrs or None
self.data = []
def endElement(self, full_name):
name = self._build_name(full_name)
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = (None if not self.data
else self.cdata_separator.join(self.data))
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted()
if len(self.stack):
data = (None if not self.data
else self.cdata_separator.join(self.data))
item = self.item
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data:
data = data.strip() or None
if data and self.force_cdata and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = None
self.data = []
self.path.pop()
def characters(self, data):
if not self.data:
self.data = [data]
else:
self.data.append(data)
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
if self._should_force_list(key, data):
item[key] = [data]
else:
item[key] = data
return item
def _should_force_list(self, key, value):
if not self.force_list:
return False
try:
return key in self.force_list
except TypeError:
return self.force_list(self.path[:-1], key, value)
def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
namespace_separator=':', disable_entities=True, **kwargs):
"""Parse the given XML input and convert it into a dictionary.
`xml_input` can either be a `string` or a file-like object.
If `xml_attribs` is `True`, element attributes are put in the dictionary
among regular child elements, using `@` as a prefix to avoid collisions. If
set to `False`, they are just ignored.
Simple example::
>>> import xmltodict
>>> doc = xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>
... \"\"\")
>>> doc['a']['@prop']
u'x'
>>> doc['a']['b']
[u'1', u'2']
If `item_depth` is `0`, the function returns a dictionary for the root
element (default behavior). Otherwise, it calls `item_callback` every time
an item at the specified depth is found and returns `None` in the end
(streaming mode).
The callback function receives two parameters: the `path` from the document
root to the item (name-attribs pairs), and the `item` (dict). If the
callback's return value is false-ish, parsing will be stopped with the
:class:`ParsingInterrupted` exception.
Streaming example::
>>> def handle(path, item):
... print('path:%s item:%s' % (path, item))
... return True
...
>>> xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>\"\"\", item_depth=2, item_callback=handle)
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:1
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:2
The optional argument `postprocessor` is a function that takes `path`,
`key` and `value` as positional arguments and returns a new `(key, value)`
pair where both `key` and `value` may have changed. Usage example::
>>> def postprocessor(path, key, value):
... try:
... return key + ':int', int(value)
... except (ValueError, TypeError):
... return key, value
>>> xmltodict.parse('<a><b>1</b><b>2</b><b>x</b></a>',
... postprocessor=postprocessor)
OrderedDict([(u'a', OrderedDict([(u'b:int', [1, 2]), (u'b', u'x')]))])
You can pass an alternate version of `expat` (such as `defusedexpat`) by
using the `expat` parameter. E.g:
>>> import defusedexpat
>>> xmltodict.parse('<a>hello</a>', expat=defusedexpat.pyexpat)
OrderedDict([(u'a', u'hello')])
You can use the force_list argument to force lists to be created even
when there is only a single child of a given level of hierarchy. The
force_list argument is a tuple of keys. If the key for a given level
of hierarchy is in the force_list argument, that level of hierarchy
will have a list as a child (even if there is only one sub-element).
The index_keys operation takes precendence over this. This is applied
after any user-supplied postprocessor has already run.
For example, given this input:
<servers>
<server>
<name>host1</name>
<os>Linux</os>
<interfaces>
<interface>
<name>em0</name>
<ip_address>10.0.0.1</ip_address>
</interface>
</interfaces>
</server>
</servers>
If called with force_list=('interface',), it will produce
this dictionary:
{'servers':
{'server':
{'name': 'host1',
'os': 'Linux'},
'interfaces':
{'interface':
[ {'name': 'em0', 'ip_address': '10.0.0.1' } ] } } }
`force_list` can also be a callable that receives `path`, `key` and
`value`. This is helpful in cases where the logic that decides whether
a list should be forced is more complex.
"""
handler = _DictSAXHandler(namespace_separator=namespace_separator,
**kwargs)
if isinstance(xml_input, _unicode):
if not encoding:
encoding = 'utf-8'
xml_input = xml_input.encode(encoding)
if not process_namespaces:
namespace_separator = None
parser = expat.ParserCreate(
encoding,
namespace_separator
)
try:
parser.ordered_attributes = True
except AttributeError:
# Jython's expat does not support ordered_attributes
pass
parser.StartNamespaceDeclHandler = handler.startNamespaceDecl
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
parser.buffer_text = True
if disable_entities:
try:
# Attempt to disable DTD in Jython's expat parser (Xerces-J).
feature = "http://apache.org/xml/features/disallow-doctype-decl"
parser._reader.setFeature(feature, True)
except AttributeError:
# For CPython / expat parser.
# Anything not handled ends up here and entities aren't expanded.
parser.DefaultHandler = lambda x: None
# Expects an integer return; zero means failure -> expat.ExpatError.
parser.ExternalEntityRefHandler = lambda *x: 1
if hasattr(xml_input, 'read'):
parser.ParseFile(xml_input)
else:
parser.Parse(xml_input, True)
return handler.item
def _process_namespace(name, namespaces, ns_sep=':', attr_prefix='@'):
if not namespaces:
return name
try:
ns, name = name.rsplit(ns_sep, 1)
except ValueError:
pass
else:
ns_res = namespaces.get(ns.strip(attr_prefix))
name = '{0}{1}{2}{3}'.format(
attr_prefix if ns.startswith(attr_prefix) else '',
ns_res, ns_sep, name) if ns_res else name
return name
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='#text',
depth=0,
preprocessor=None,
pretty=False,
newl='\n',
indent='\t',
namespace_separator=':',
namespaces=None,
full_document=True):
key = _process_namespace(key, namespaces, namespace_separator, attr_prefix)
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
if (not hasattr(value, '__iter__') or
isinstance(value, _basestring) or
isinstance(value, dict)):
value = [value]
for index, v in enumerate(value):
if full_document and depth == 0 and index > 0:
raise ValueError('document with multiple roots')
if v is None:
v = OrderedDict()
elif not isinstance(v, dict):
v = _unicode(v)
if isinstance(v, _basestring):
v = OrderedDict(((cdata_key, v),))
cdata = None
attrs = OrderedDict()
children = []
for ik, iv in v.items():
if ik == cdata_key:
cdata = iv
continue
if ik.startswith(attr_prefix):
ik = _process_namespace(ik, namespaces, namespace_separator,
attr_prefix)
if ik == '@xmlns' and isinstance(iv, dict):
for x, y in iv.items():
attr = 'xmlns{0}'.format(':{0}'.format(x) if x else '')
attrs[attr] = _unicode(y)
continue
if not isinstance(iv, _unicode):
iv = _unicode(iv)
attrs[ik[len(attr_prefix):]] = iv
continue
children.append((ik, iv))
if pretty:
content_handler.ignorableWhitespace(depth * indent)
content_handler.startElement(key, AttributesImpl(attrs))
if pretty and children:
content_handler.ignorableWhitespace(newl)
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, depth + 1, preprocessor,
pretty, newl, indent, namespaces=namespaces,
namespace_separator=namespace_separator)
if cdata is not None:
content_handler.characters(cdata)
if pretty and children:
content_handler.ignorableWhitespace(depth * indent)
content_handler.endElement(key)
if pretty and depth:
content_handler.ignorableWhitespace(newl)
def unparse(input_dict, output=None, encoding='utf-8', full_document=True,
short_empty_elements=False,
**kwargs):
"""Emit an XML document for the given `input_dict` (reverse of `parse`).
The resulting XML document is returned as a string, but if `output` (a
file-like object) is specified, it is written there instead.
Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted
as XML node attributes, whereas keys equal to `cdata_key`
(default=`'#text'`) are treated as character data.
The `pretty` parameter (default=`False`) enables pretty-printing. In this
mode, lines are terminated with `'\n'` and indented with `'\t'`, but this
can be customized with the `newl` and `indent` parameters.
"""
if full_document and len(input_dict) != 1:
raise ValueError('Document must have exactly one root.')
must_return = False
if output is None:
output = StringIO()
must_return = True
if short_empty_elements:
content_handler = XMLGenerator(output, encoding, True)
else:
content_handler = XMLGenerator(output, encoding)
if full_document:
content_handler.startDocument()
for key, value in input_dict.items():
_emit(key, value, content_handler, full_document=full_document,
**kwargs)
if full_document:
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
| {
"content_hash": "c5b2bf5f59a6decf8b12e8c0460aa272",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 80,
"avg_line_length": 36.44638949671772,
"alnum_prop": 0.5665826128722382,
"repo_name": "transtats/transtats",
"id": "08d8faff2b9df7b555cd13ac89262b327a6364b3",
"size": "16678",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "dashboard/converters/xml2dict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7771"
},
{
"name": "Dockerfile",
"bytes": "4471"
},
{
"name": "HTML",
"bytes": "319854"
},
{
"name": "JavaScript",
"bytes": "5529"
},
{
"name": "Makefile",
"bytes": "1185"
},
{
"name": "Python",
"bytes": "751517"
},
{
"name": "Shell",
"bytes": "7337"
}
],
"symlink_target": ""
} |
import errno
import logging
import os
import re
import shutil
import tempfile
import threading
import time
from devil.android import decorators
from devil.android import device_errors
from devil.android.sdk import adb_wrapper
from devil.utils import reraiser_thread
logger = logging.getLogger(__name__)
class LogcatMonitor(object):
_RECORD_ITER_TIMEOUT = 0.2
_RECORD_THREAD_JOIN_WAIT = 5.0
_WAIT_TIME = 0.2
_THREADTIME_RE_FORMAT = (
r'(?P<date>\S*) +(?P<time>\S*) +(?P<proc_id>%s) +(?P<thread_id>%s) +'
r'(?P<log_level>%s) +(?P<component>%s) *: +(?P<message>%s)$')
def __init__(self, adb, clear=True, filter_specs=None, output_file=None):
"""Create a LogcatMonitor instance.
Args:
adb: An instance of adb_wrapper.AdbWrapper.
clear: If True, clear the logcat when monitoring starts.
filter_specs: An optional list of '<tag>[:priority]' strings.
output_file: File path to save recorded logcat.
"""
if isinstance(adb, adb_wrapper.AdbWrapper):
self._adb = adb
else:
raise ValueError('Unsupported type passed for argument "device"')
self._clear = clear
self._filter_specs = filter_specs
self._output_file = output_file
self._record_file = None
self._record_file_lock = threading.Lock()
self._record_thread = None
self._stop_recording_event = threading.Event()
@property
def output_file(self):
return self._output_file
@decorators.WithTimeoutAndRetriesDefaults(10, 0)
def WaitFor(self, success_regex, failure_regex=None, timeout=None,
retries=None):
"""Wait for a matching logcat line or until a timeout occurs.
This will attempt to match lines in the logcat against both |success_regex|
and |failure_regex| (if provided). Note that this calls re.search on each
logcat line, not re.match, so the provided regular expressions don't have
to match an entire line.
Args:
success_regex: The regular expression to search for.
failure_regex: An optional regular expression that, if hit, causes this
to stop looking for a match. Can be None.
timeout: timeout in seconds
retries: number of retries
Returns:
A match object if |success_regex| matches a part of a logcat line, or
None if |failure_regex| matches a part of a logcat line.
Raises:
CommandFailedError on logcat failure (NOT on a |failure_regex| match).
CommandTimeoutError if no logcat line matching either |success_regex| or
|failure_regex| is found in |timeout| seconds.
DeviceUnreachableError if the device becomes unreachable.
LogcatMonitorCommandError when calling |WaitFor| while not recording
logcat.
"""
if self._record_thread is None:
raise LogcatMonitorCommandError(
'Must be recording logcat when calling |WaitFor|',
device_serial=str(self._adb))
if isinstance(success_regex, basestring):
success_regex = re.compile(success_regex)
if isinstance(failure_regex, basestring):
failure_regex = re.compile(failure_regex)
logger.debug('Waiting %d seconds for "%s"', timeout, success_regex.pattern)
# NOTE This will continue looping until:
# - success_regex matches a line, in which case the match object is
# returned.
# - failure_regex matches a line, in which case None is returned
# - the timeout is hit, in which case a CommandTimeoutError is raised.
with open(self._record_file.name, 'r') as f:
while True:
line = f.readline()
if line:
m = success_regex.search(line)
if m:
return m
if failure_regex and failure_regex.search(line):
return None
else:
time.sleep(self._WAIT_TIME)
def FindAll(self, message_regex, proc_id=None, thread_id=None, log_level=None,
component=None):
"""Finds all lines in the logcat that match the provided constraints.
Args:
message_regex: The regular expression that the <message> section must
match.
proc_id: The process ID to match. If None, matches any process ID.
thread_id: The thread ID to match. If None, matches any thread ID.
log_level: The log level to match. If None, matches any log level.
component: The component to match. If None, matches any component.
Raises:
LogcatMonitorCommandError when calling |FindAll| before recording logcat.
Yields:
A match object for each matching line in the logcat. The match object
will always contain, in addition to groups defined in |message_regex|,
the following named groups: 'date', 'time', 'proc_id', 'thread_id',
'log_level', 'component', and 'message'.
"""
if self._record_file is None:
raise LogcatMonitorCommandError(
'Must have recorded or be recording a logcat to call |FindAll|',
device_serial=str(self._adb))
if proc_id is None:
proc_id = r'\d+'
if thread_id is None:
thread_id = r'\d+'
if log_level is None:
log_level = r'[VDIWEF]'
if component is None:
component = r'[^\s:]+'
# pylint: disable=protected-access
threadtime_re = re.compile(
type(self)._THREADTIME_RE_FORMAT % (
proc_id, thread_id, log_level, component, message_regex))
with open(self._record_file.name, 'r') as f:
for line in f:
m = re.match(threadtime_re, line)
if m:
yield m
def _StartRecording(self):
"""Starts recording logcat to file.
Function spawns a thread that records logcat to file and will not die
until |StopRecording| is called.
"""
def record_to_file():
# Write the log with line buffering so the consumer sees each individual
# line.
for data in self._adb.Logcat(filter_specs=self._filter_specs,
logcat_format='threadtime',
iter_timeout=self._RECORD_ITER_TIMEOUT):
if self._stop_recording_event.isSet():
return
if data is None:
# Logcat can yield None if the iter_timeout is hit.
continue
with self._record_file_lock:
if self._record_file and not self._record_file.closed:
self._record_file.write(data + '\n')
self._stop_recording_event.clear()
if not self._record_thread:
self._record_thread = reraiser_thread.ReraiserThread(record_to_file)
self._record_thread.start()
def _StopRecording(self):
"""Finish recording logcat."""
if self._record_thread:
self._stop_recording_event.set()
self._record_thread.join(timeout=self._RECORD_THREAD_JOIN_WAIT)
self._record_thread.ReraiseIfException()
self._record_thread = None
def Start(self):
"""Starts the logcat monitor.
Clears the logcat if |clear| was set in |__init__|.
"""
if self._clear:
self._adb.Logcat(clear=True)
if not self._record_file:
self._record_file = tempfile.NamedTemporaryFile(mode='a', bufsize=1)
self._StartRecording()
def Stop(self):
"""Stops the logcat monitor.
Stops recording the logcat. Copies currently recorded logcat to
|self._output_file|.
"""
self._StopRecording()
with self._record_file_lock:
if self._record_file and self._output_file:
try:
os.makedirs(os.path.dirname(self._output_file))
except OSError as e:
if e.errno != errno.EEXIST:
raise
shutil.copy(self._record_file.name, self._output_file)
def Close(self):
"""Closes logcat recording file.
Should be called when finished using the logcat monitor.
"""
with self._record_file_lock:
if self._record_file:
self._record_file.close()
self._record_file = None
def __enter__(self):
"""Starts the logcat monitor."""
self.Start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stops the logcat monitor."""
self.Stop()
def __del__(self):
"""Closes logcat recording file in case |Close| was never called."""
with self._record_file_lock:
if self._record_file:
logger.warning(
'Need to call |Close| on the logcat monitor when done!')
self._record_file.close()
@property
def adb(self):
return self._adb
class LogcatMonitorCommandError(device_errors.CommandFailedError):
"""Exception for errors with logcat monitor commands."""
pass
| {
"content_hash": "81d7fe96dbfe43bd7b62c191ed9d750b",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 80,
"avg_line_length": 34.06024096385542,
"alnum_prop": 0.647329324372126,
"repo_name": "catapult-project/catapult-csm",
"id": "571662f95293913b1d0dde65e0c57d31e1d9d49e",
"size": "8679",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "devil/devil/android/logcat_monitor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
} |
import numpy
from rpyc.core import netref
def apply_pathes():
def fixed_make_method(name, doc, orig=netref._make_method):
if name == "__array__":
def __array__(self, dtype=None):
# Note that protocol=-1 will only work between python
# interpreters of the same version.
res = netref.pickle.loads(
netref.syncreq(
self,
netref.consts.HANDLE_PICKLE,
netref.pickle.HIGHEST_PROTOCOL,
)
)
if dtype is not None:
res = numpy.asarray(res, dtype=dtype)
return res
__array__.__doc__ = doc
return __array__
return orig(name, doc)
netref._make_method = fixed_make_method
| {
"content_hash": "b3694d017f9ef4e92a686d7b92bdd6a4",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 69,
"avg_line_length": 29.413793103448278,
"alnum_prop": 0.4771395076201641,
"repo_name": "modin-project/modin",
"id": "13d33e6652e1b5d083ac2ef80c6a6d56ab31e5b1",
"size": "1636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modin/experimental/cloud/rpyc_patches.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2330"
},
{
"name": "Python",
"bytes": "3914783"
},
{
"name": "Shell",
"bytes": "2377"
}
],
"symlink_target": ""
} |
from functools import wraps
from elasticsearch import ElasticsearchException
from statsd import statsd
def es_error_statsd(fun):
"""Sends a statsd ping for every ES error
.. Note::
This has to be the inner-most decorator so that it can see the
Elasticsearch error, do its statsd thing and re-raise it for
error handling.
"""
@wraps(fun)
def _es_error_statsd(*args, **kwargs):
try:
return fun(*args, **kwargs)
except ElasticsearchException:
statsd.incr('elasticsearch.error')
raise
return _es_error_statsd
def to_class_path(cls):
"""Returns class path for a class
Takes a class and returns the class path which is composed of the
module plus the class name. This can be reversed later to get the
class using ``from_class_path``.
:returns: string
>>> from fjord.search.models import Record
>>> to_class_path(Record)
'fjord.search.models:Record'
"""
return ':'.join([cls.__module__, cls.__name__])
def from_class_path(cls_path):
"""Returns the class
Takes a class path and returns the class for it.
:returns: varies
>>> from_class_path('fjord.search.models:Record')
<Record ...>
"""
module_path, cls_name = cls_path.split(':')
module = __import__(module_path, fromlist=[cls_name])
return getattr(module, cls_name)
| {
"content_hash": "33aea7aeefa345586fca6c732e3d860b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 69,
"avg_line_length": 24.189655172413794,
"alnum_prop": 0.639344262295082,
"repo_name": "rlr/fjord",
"id": "67d4f2e3d3fd03736699979ec945303770f33b9f",
"size": "1403",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "fjord/search/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "159723"
},
{
"name": "HTML",
"bytes": "133287"
},
{
"name": "JavaScript",
"bytes": "304461"
},
{
"name": "Python",
"bytes": "823931"
},
{
"name": "Shell",
"bytes": "11741"
},
{
"name": "Smarty",
"bytes": "780"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template
from flask import redirect, url_for, request, session, make_response
import sqlite3, os
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/form/',methods = ['POST'])
def form():
db = sqlite3.connect(os.path.join(app.root_path,"test.db"))
db.execute('insert into test values(?,?)',[request.form['name'], request.form['email']])
db.commit()
return render_template('show.html')
@app.route('/show/')
def show():
db = sqlite3.connect(os.path.join(app.root_path,"test.db"))
cur = db.execute('select * from test')
info = cur.fetchall()
return render_template('info.html', info = info)
if __name__ == '__main__':
app.run(debug=True)
| {
"content_hash": "329c2864bf6debf49abb6999f6372691",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 92,
"avg_line_length": 26.344827586206897,
"alnum_prop": 0.6465968586387435,
"repo_name": "smdsbz/homework",
"id": "3fdf81bb6ce1cbfbd89e8d102f6f4c1aceaec363",
"size": "764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/learnFlask/back-end_task_3/back-end_task_3/sample/hello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "229446"
},
{
"name": "C++",
"bytes": "55207"
},
{
"name": "CMake",
"bytes": "21186"
},
{
"name": "CSS",
"bytes": "109"
},
{
"name": "HTML",
"bytes": "55977"
},
{
"name": "JavaScript",
"bytes": "56"
},
{
"name": "Makefile",
"bytes": "14225"
},
{
"name": "Python",
"bytes": "13686"
}
],
"symlink_target": ""
} |
import glfw
import OpenGL.GL.shaders
import numpy as np
from OpenGL.GL import *
def main():
# initalize glfw
if not glfw.init():
return
window = glfw.create_window(800, 600, "My OpenGL Window", None, None)
if not window:
glfw.terminate()
return
glfw.make_context_current(window)
# Create shaders
vertex_shader = """
#version 330
in vec3 position;
in vec3 color;
out vec3 newColor;
void main()
{
gl_Position = vec4(position, 1.0f);
newColor = color;
}
"""
fragment_shader = """
#version 330
in vec3 newColor;
out vec4 outColor;
void main()
{
outColor = vec4(newColor, 1.0f);
}
"""
shader = OpenGL.GL.shaders.compileProgram(
OpenGL.GL.shaders.compileShader(vertex_shader, GL_VERTEX_SHADER),
OpenGL.GL.shaders.compileShader(fragment_shader, GL_FRAGMENT_SHADER)
)
# Create triangle
# POSITIONS COLORS
triangle = np.array([
-0.5, -0.5, 0.0, 1.0, 0.0, 0.0,
0.5 , -0.5, 0.0, 0.0, 1.0, 0.0,
0.0 , 0.5, 0.0, 0.0, 0.0, 1.0
], dtype=np.float32)
# Create buffer on GPU
VBO = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, 18 * 4, triangle, GL_STATIC_DRAW) # 18 values * 4 bytes
position = glGetAttribLocation(shader, "position")
glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))
glEnableVertexAttribArray(position)
color = glGetAttribLocation(shader, "color")
glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))
# 12 is the starting position of color in the array. Since we have three 4 bytes for position, color will start at 12
glEnableVertexAttribArray(color)
glUseProgram(shader)
glClearColor(0.2, 0.3, 0.2, 1.0)
while not glfw.window_should_close(window):
glfw.poll_events()
glClear(GL_COLOR_BUFFER_BIT)
glDrawArrays(GL_TRIANGLES, 0, 3) # 3 vertices
glfw.swap_buffers(window)
glfw.terminate()
if __name__ == "__main__":
main()
| {
"content_hash": "37595b6f67e8c9935a5cc67c66ed9973",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 121,
"avg_line_length": 24.04494382022472,
"alnum_prop": 0.6158878504672897,
"repo_name": "quanhua92/learning-notes",
"id": "394296c42a63378aa34b1597f1fecea4db5dfcea",
"size": "2140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/pyopengl/02_coloring_each_vertex.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1491"
},
{
"name": "C++",
"bytes": "578330"
},
{
"name": "CMake",
"bytes": "2988"
},
{
"name": "CSS",
"bytes": "63793"
},
{
"name": "HTML",
"bytes": "135800"
},
{
"name": "Java",
"bytes": "47446"
},
{
"name": "JavaScript",
"bytes": "14704"
},
{
"name": "Jupyter Notebook",
"bytes": "5373459"
},
{
"name": "Python",
"bytes": "166227"
},
{
"name": "QMake",
"bytes": "16168"
},
{
"name": "XSLT",
"bytes": "7770"
}
],
"symlink_target": ""
} |
import scrapy
class BfscrapsItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
# pass
| {
"content_hash": "384a89bd89039bdba1b865bdfd04f7fc",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 48,
"avg_line_length": 19.285714285714285,
"alnum_prop": 0.6814814814814815,
"repo_name": "mattgiguere/doglodge",
"id": "9c9490e8dff9882b632195f8203b7dd7fdeecbd9",
"size": "287",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code/bfscraps/bfscraps/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "115955"
},
{
"name": "HTML",
"bytes": "97612"
},
{
"name": "Python",
"bytes": "94324"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [#'matplotlib.sphinxext.mathmpl',
#'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.plot_directive',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
#'ipython_console_highlighting',
#'numpydoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gridmap'
copyright = u'2012, Bjørn Ådlandsvik'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'gridmapdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
'papersize': 'a4paper'
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'gridmap.tex', u'Gridmap Documentation',
u'Bjørn Ådlandsvik\\\ Institute of Marine Research', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
latex_domain_indices = False
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gridmap', u'Gridmap Documentation',
[u'Bjørn Ådlandsvik'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'gridmap', u'gridmap Documentation',
u'Bjørn Ådlandsvik', 'gridmap', 'One line description of project.',
'Miscellaneous')]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "1a23e848f3eb84e90cc2f02a13788ad6",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 80,
"avg_line_length": 32.34166666666667,
"alnum_prop": 0.693635660912136,
"repo_name": "bjornaa/gridmap",
"id": "7a688dbfcdbfab08ad8490d767d7fb78155f2611",
"size": "8188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47782"
},
{
"name": "Shell",
"bytes": "270"
}
],
"symlink_target": ""
} |
from darwinpush.xb.raw.binding_ import *
| {
"content_hash": "9c0cd11a22c3936b42309c4f594e5b48",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 40,
"avg_line_length": 41,
"alnum_prop": 0.7804878048780488,
"repo_name": "HackTrain/darwinpush",
"id": "9f5e6c22c18ceb3ca1e5ec7db55ddb0cec7ba081",
"size": "65",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "darwinpush/xb/binding_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "573058"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for convenient
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", ("Media Library", "fb_browse"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, "Top navigation bar", "pages/menus/dropdown.html"),
# (2, "Left-hand tree", "pages/menus/tree.html"),
# (3, "Footer", "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# ("Image",),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# ("Another name",),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the south application will be automatically added to the
# INSTALLED_APPS setting.
USE_SOUTH = True
########################
# MAIN DJANGO SETTINGS #
########################
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'),
# ('Full Name', 'anotheremail@example.com'))
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = None
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
_ = lambda s: s
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ("127.0.0.1",)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
import os
# Full filesystem path to the project.
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Name of the directory for the project.
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.pages",
"mezzanine.galleries",
"mezzanine.twitter",
#"mezzanine.accounts",
#"mezzanine.mobile",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
###################
# DEPLOY SETTINGS #
###################
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "SSH_USER": "", # SSH username for host deploying to
# "HOSTS": ALLOWED_HOSTS[:1], # List of hosts to deploy to (eg, first host)
# "DOMAINS": ALLOWED_HOSTS, # Domains for public site
# "REPO_URL": "ssh://hg@bitbucket.org/user/project", # Project's repo URL
# "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs
# "PROJECT_NAME": "", # Unique identifier for project
# "REQUIREMENTS_PATH": "requirements.txt", # Project's pip requirements
# "GUNICORN_PORT": 8000, # Port gunicorn will listen on
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# "SECRET_KEY": SECRET_KEY,
# "NEVERCACHE_KEY": NEVERCACHE_KEY,
# }
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from local_settings import *
except ImportError:
pass
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
| {
"content_hash": "4d1a059eeac1e3303c9b3273b8d35aa1",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 79,
"avg_line_length": 34.89010989010989,
"alnum_prop": 0.6886614173228346,
"repo_name": "eroesch/mymezz",
"id": "575b86bce1a129fad72f9cb00cbc59c38130b300",
"size": "12700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20914"
}
],
"symlink_target": ""
} |
from requestbuilder import Arg
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT, arg_iprofile
from euca2ools.commands.iam.deleterole import DeleteRole
from euca2ools.commands.iam.getinstanceprofile import GetInstanceProfile
from euca2ools.commands.iam.removerolefrominstanceprofile import \
RemoveRoleFromInstanceProfile
class DeleteInstanceProfile(IAMRequest):
DESCRIPTION = ('Delete an instance profile\n\nThis will break any running '
'instances that depend upon access to the deleted instance '
'profile.')
ARGS = [arg_iprofile(
help='name of the instance profile to delete (required)'),
Arg('-r', '--recursive', action='store_true', route_to=None,
help='''remove all IAM resources associated with the instance
profile first'''),
Arg('-p', '--pretend', action='store_true', route_to=None,
help='''list the resources that would be deleted instead of
actually deleting them. Implies -r.'''),
AS_ACCOUNT]
def main(self):
if self.args.get('recursive') or self.args.get('pretend'):
# Figure out what we have to delete
req = GetInstanceProfile.from_other(
self, InstanceProfileName=self.args['InstanceProfileName'],
DelegateAccount=self.args.get('DelegateAccount'))
response = req.main()
roles = []
for role in response.get('InstanceProfile', {}).get('Roles') or []:
roles.append({'arn': role.get('Arn'),
'name': role.get('RoleName')})
else:
# Just in case
roles = []
if self.args.get('pretend'):
return {'roles': roles}
else:
if self.args.get('recursive'):
for role in roles:
req = RemoveRoleFromInstanceProfile.from_other(
self, RoleName=role['name'],
InstanceProfileName=self.args['InstanceProfileName'],
DelegateAccount=self.args.get('DelegateAccount'))
req.main()
# This role could be attached to another instance
# profile, which means that a truly-recursive delete
# would need to also remove it from that instance
# profile, delete all of the role's policies, and
# so on. The failure modes for this are rather nasty,
# so we don't tell DeleteRole to delete recursively;
# if the same role belongs to more than one instance
# profile then DeleteRole will simply fail harmlessly.
req = DeleteRole.from_other(
self, RoleName=role['name'],
DelegateAccount=self.args.get('DelegateAccount'))
req.main()
return self.send()
def print_result(self, result):
if self.args.get('pretend'):
print 'roles'
for role in result['roles']:
print '\t' + role['arn']
| {
"content_hash": "42a542dd3b282d0cc05830d60982d791",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 79,
"avg_line_length": 48.36363636363637,
"alnum_prop": 0.5632832080200502,
"repo_name": "jhajek/euca2ools",
"id": "ab266da11e2b176fc1113eb4e8f9357f844f887e",
"size": "4539",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "euca2ools/commands/iam/deleteinstanceprofile.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1230266"
},
{
"name": "Shell",
"bytes": "872"
}
],
"symlink_target": ""
} |
import time
from math import isinf
from indico.util.console import cformat
class Benchmark:
"""Simple benchmark class.
Can be used manually or as a contextmanager:
with Benchmark() as b:
do_stuff()
b.print_result()
"""
def __init__(self, start=False):
self._start_time = None
self._end_time = None
if start:
self.start()
def start(self):
self._start_time = time.time()
return self
def stop(self):
self._end_time = time.time()
def __enter__(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def __float__(self):
if self._start_time is None:
return float('-inf') # not started
elif self._end_time is None:
return float('inf') # not finished
return self._end_time - self._start_time
def __str__(self):
duration = float(self)
if isinf(duration):
return str(duration)
return f'{duration:.05f}'
__repr__ = __str__
def print_result(self, slow=float('inf'), veryslow=float('inf')):
duration = float(self)
if duration == float('-inf'):
print(cformat('%{blue!}skipped'))
elif duration == float('inf'):
print(cformat('%{red}running'))
elif duration >= veryslow:
print(cformat('%{red!}{}').format(self))
elif duration >= slow:
print(cformat('%{yellow!}{}').format(self))
else:
print(cformat('%{green!}{}').format(self))
| {
"content_hash": "59f8c24a915cabcc36421aadb17d53cc",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 69,
"avg_line_length": 25.677419354838708,
"alnum_prop": 0.5383165829145728,
"repo_name": "pferreir/indico",
"id": "24920a5e92beb405199e4248ee20b52e1792a97f",
"size": "1806",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/util/benchmark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1394116"
},
{
"name": "JavaScript",
"bytes": "2078347"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "4993798"
},
{
"name": "SCSS",
"bytes": "475126"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
import unittest
from cssqc.parser import CSSQC
from cssqc.qualityWarning import QualityWarning
class Test_noUniversalSelectors(unittest.TestCase):
def parse(self, data):
c = CSSQC({"noUniversalSelectors": True})
c.parse(data)
return c
def test_no_universal_selectors(self):
c = self.parse('''* {
margin: 0;
}
div * {
}
*.class {
color: blue;
}
*{
padding: none;
}
span *{
width: 100px;
},
table *, div {
display: none;
}
span,* {
padding: 5px;
}''')
self.assertEqual(c.warnings, [
QualityWarning('noUniversalSelectors', 1),
QualityWarning('noUniversalSelectors', 4),
QualityWarning('noUniversalSelectors', 9),
QualityWarning('noUniversalSelectors', 12),
QualityWarning('noUniversalSelectors', 15),
QualityWarning('noUniversalSelectors', 18)
])
| {
"content_hash": "3d5f261eee8d472c9f81f72ca5eec2d9",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 55,
"avg_line_length": 23.076923076923077,
"alnum_prop": 0.6122222222222222,
"repo_name": "matematik7/CSSQC",
"id": "7c1e73d91a45fe52dd2e62807b7707e0dc63ab97",
"size": "1305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_noUniversalSelectors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "111603"
},
{
"name": "Python",
"bytes": "199164"
}
],
"symlink_target": ""
} |
from typing import List
import typepy
from ._text_writer import TextTableWriter
class CsvTableWriter(TextTableWriter):
"""
A table writer class for character separated values format.
The default separated character is a comma (``","``).
:Example:
:ref:`example-csv-table-writer`
"""
FORMAT_NAME = "csv"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
@property
def support_split_write(self) -> bool:
return True
@property
def margin(self) -> int:
return self._margin
@margin.setter
def margin(self, value: int) -> None:
# margin setting must be ignored
return
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._set_chars("")
self.indent_string = ""
self.column_delimiter = kwargs.get("column_delimiter", ",")
self._margin = 0
self.is_padding = False
self.is_formatting_float = False
self.is_write_header_separator_row = False
self._quoting_flags[typepy.Typecode.NULL_STRING] = False
def _write_header(self) -> None:
if typepy.is_empty_sequence(self.headers):
return
super()._write_header()
def _get_opening_row_items(self) -> List[str]:
return []
def _get_value_row_separator_items(self) -> List[str]:
return []
def _get_closing_row_items(self) -> List[str]:
return []
| {
"content_hash": "f7cdb70f8d5a0f7b0b2ac8818a4c2010",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 67,
"avg_line_length": 23.49206349206349,
"alnum_prop": 0.5905405405405405,
"repo_name": "thombashi/pytablewriter",
"id": "b88a9191d96b0c9cb150d753c64338e62164d118",
"size": "1480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytablewriter/writer/text/_csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2366"
},
{
"name": "Python",
"bytes": "649545"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import contextlib
import unittest
from functools import partial
import numpy as np
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler
def get_places():
places = []
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
return places
@contextlib.contextmanager
def prog_scope_guard(main_prog, startup_prog):
scope = fluid.core.Scope()
with fluid.unique_name.guard():
with fluid.scope_guard(scope):
with fluid.program_guard(main_prog, startup_prog):
yield
def bow_net(data,
label,
dict_dim,
is_sparse=False,
emb_dim=128,
hid_dim=128,
hid_dim2=96,
class_dim=2):
"""
BOW net
This model is from https://github.com/PaddlePaddle/models:
fluid/PaddleNLP/text_classification/nets.py
"""
emb = fluid.layers.embedding(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim])
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow)
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
return avg_cost
class TestWeightDecay(unittest.TestCase):
def setUp(self):
self.word_dict = paddle.dataset.imdb.word_dict()
reader = paddle.batch(
paddle.dataset.imdb.train(self.word_dict), batch_size=4)()
self.train_data = [next(reader) for _ in range(5)]
self.learning_rate = .5
def run_executor(self, place, feed_list, loss):
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=feed_list, place=place)
exe.run(fluid.default_startup_program())
main_prog = fluid.default_main_program()
loss_set = []
for data in self.train_data:
out = exe.run(main_prog,
feed=feeder.feed(data),
fetch_list=[loss.name])
print("loss %s" % (np.average(out)))
loss_set.append(np.average(out))
return loss_set
def run_parallel_exe(self,
place,
feed_list,
loss,
use_cuda=True,
use_reduce=False,
use_fast_executor=False,
use_ir_memory_optimize=False):
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=feed_list, place=place)
exe.run(fluid.default_startup_program())
exec_strategy = fluid.ExecutionStrategy()
if use_fast_executor:
exec_strategy.use_experimental_executor = True
build_strategy = fluid.BuildStrategy()
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce \
if use_reduce else fluid.BuildStrategy.ReduceStrategy.AllReduce
build_strategy.memory_optimize = use_ir_memory_optimize
train_cp = compiler.CompiledProgram(fluid.default_main_program(
)).with_data_parallel(
loss_name=loss.name,
exec_strategy=exec_strategy,
build_strategy=build_strategy)
loss_set = []
for data in self.train_data:
out = exe.run(train_cp,
feed=feeder.feed(data),
fetch_list=[loss.name])
loss_set.append(np.average(out))
return loss_set
def check_weight_decay(self,
place,
model,
use_parallel_exe=False,
use_reduce=False):
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()
startup_prog.random_seed = 1
with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog):
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
avg_cost = model(data, label, len(self.word_dict))
param_list = [(var, var * self.learning_rate)
for var in main_prog.block(0).all_parameters()]
optimizer = fluid.optimizer.Adagrad(
learning_rate=self.learning_rate)
optimizer.minimize(avg_cost)
for params in param_list:
updated_p = fluid.layers.elementwise_sub(
x=params[0], y=params[1])
fluid.layers.assign(input=updated_p, output=params[0])
if use_parallel_exe:
loss = self.run_parallel_exe(
place, [data, label],
loss=avg_cost,
use_cuda=True,
use_reduce=use_reduce)
else:
loss = self.run_executor(place, [data, label], loss=avg_cost)
return loss
def test_weight_decay(self):
model = partial(bow_net, is_sparse=False)
for place in get_places():
loss = self.check_weight_decay(place, model, use_parallel_exe=False)
loss2 = self.check_weight_decay(
place, model, use_parallel_exe=True, use_reduce=False)
for i in range(len(loss)):
assert np.isclose(a=loss[i], b=loss2[i], rtol=5e-5)
loss3 = self.check_weight_decay(
place, model, use_parallel_exe=True, use_reduce=True)
for i in range(len(loss)):
assert np.isclose(a=loss[i], b=loss3[i], rtol=5e-5)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "fc2fc69def3a768debde3f10e8fc2b2d",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 84,
"avg_line_length": 34.2,
"alnum_prop": 0.5619047619047619,
"repo_name": "baidu/Paddle",
"id": "e5e7e76737177f7f4aaae7d7e28e9e5166b96de5",
"size": "6598",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_weight_decay.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "217842"
},
{
"name": "C++",
"bytes": "2771237"
},
{
"name": "CMake",
"bytes": "113670"
},
{
"name": "Cuda",
"bytes": "424141"
},
{
"name": "M4",
"bytes": "40913"
},
{
"name": "Perl",
"bytes": "11412"
},
{
"name": "Python",
"bytes": "892636"
},
{
"name": "Shell",
"bytes": "64351"
}
],
"symlink_target": ""
} |
""" Sahana Eden Automated Test - HRM001 Create Job Role
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class CreateVolunteerJobRole(SeleniumUnitTest):
def test_hrm001_create_volunteer_job_role(self):
"""
@case: HRM001
@description: Create a Job Role
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
self.login(account="admin", nexturl="vol/job_role/create")
self.create("hrm_job_role",
[( "name",
"Facility Manager"
),
( "comments",
"Comment/Description of the role job goes here."),
]
)
| {
"content_hash": "49df865ce51d20b03d9aab96e0d2fe35",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 110,
"avg_line_length": 40.745098039215684,
"alnum_prop": 0.6568816169393648,
"repo_name": "vgupta6/Project-2",
"id": "ed3697540793fa1b9fcc3e6d54de67eb8393904b",
"size": "2078",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/tests/volunteer/create_volunteer_job_role.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "15540599"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "2202"
},
{
"name": "Python",
"bytes": "23301481"
},
{
"name": "Racket",
"bytes": "166"
}
],
"symlink_target": ""
} |
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages as well as lib/site-python.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import __builtin__
import traceback
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
# These values are initialized by the getuserbase() and getusersitepackages()
# functions, through the main() function when Python starts.
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if hasattr(m, '__loader__'):
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except (AttributeError, OSError):
pass
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from sysconfig import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path.pop()), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
try:
if line.startswith(("import ", "import\t")):
exec line
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception as err:
print >>sys.stderr, "Error processing line {:d} of {}:\n".format(
n+1, fullname)
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print >>sys.stderr, ' '+line
print >>sys.stderr, "\nRemainder of file ignored"
break
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + "pth"
names = [name for name in names if name.endswith(dotpth)]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is not None:
return USER_BASE
from sysconfig import get_config_var
USER_BASE = get_config_var('userbase')
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
user_base = getuserbase() # this will also set USER_BASE
if USER_SITE is not None:
return USER_SITE
from sysconfig import get_path
import os
if sys.platform == 'darwin':
from sysconfig import get_config_var
if get_config_var('PYTHONFRAMEWORK'):
USER_SITE = get_path('purelib', 'osx_framework_user')
return USER_SITE
USER_SITE = get_path('purelib', '%s_user' % os.name)
return USER_SITE
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python).
For each directory present in the global ``PREFIXES``, this function
will find its `site-packages` subdirectory depending on the system
environment, and will return a list of full paths.
"""
sitepackages = []
seen = set()
for prefix in PREFIXES:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos'):
sitepackages.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitepackages.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
sitepackages.append(os.path.join(prefix, "lib", "site-python"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
if sys.platform == "darwin":
# for framework builds *only* we add the standard Apple
# locations.
from sysconfig import get_config_var
framework = get_config_var("PYTHONFRAMEWORK")
if framework and "/%s.framework/"%(framework,) in prefix:
sitepackages.append(
os.path.join("/Library", framework,
sys.version[:3], "site-packages"))
return sitepackages
def addsitepackages(known_paths):
"""Add site-packages (and possibly site-python) to sys.path"""
for sitedir in getsitepackages():
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new builtins 'quit' and 'exit'.
These are objects which make the interpreter exit when called.
The repr of each object contains a hint at how it works.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
__builtin__.quit = Quitter('quit')
__builtin__.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = file(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print self.__lines[i]
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = raw_input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
__builtin__.copyright = _Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
__builtin__.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
elif sys.platform == 'cli':
__builtin__.credits = _Printer(
"credits",
"IronPython is maintained by the IronPython developers (www.ironpython.net).")
else:
__builtin__.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
__builtin__.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
__builtin__.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print >>sys.stderr, \
"'import sitecustomize' failed; use -v for traceback"
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print>>sys.stderr, \
"'import usercustomize' failed; use -v for traceback"
def main():
global ENABLE_USER_SITE
abs__file__()
known_paths = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print "sys.path = ["
for dir in sys.path:
print " %r," % (dir,)
print "]"
print "USER_BASE: %r (%s)" % (USER_BASE,
"exists" if os.path.isdir(USER_BASE) else "doesn't exist")
print "USER_SITE: %r (%s)" % (USER_SITE,
"exists" if os.path.isdir(USER_SITE) else "doesn't exist")
print "ENABLE_USER_SITE: %r" % ENABLE_USER_SITE
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print os.pathsep.join(buffer)
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print textwrap.dedent(help % (sys.argv[0], os.pathsep))
sys.exit(10)
if __name__ == '__main__':
_script()
| {
"content_hash": "ec9baa0266c95b92892b1f6fab11a5b5",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 90,
"avg_line_length": 33.07592891760905,
"alnum_prop": 0.6002246751978119,
"repo_name": "jstammers/EDMSuite",
"id": "0b21c166689afda91f98739ede48d159d29828a0",
"size": "20474",
"binary": false,
"copies": "41",
"ref": "refs/heads/atom-mega-mix",
"path": "NavPython/IronPython/Lib/site.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3461"
},
{
"name": "C#",
"bytes": "6585392"
},
{
"name": "CSS",
"bytes": "5394"
},
{
"name": "F#",
"bytes": "1632"
},
{
"name": "Forth",
"bytes": "790"
},
{
"name": "HTML",
"bytes": "163836"
},
{
"name": "JavaScript",
"bytes": "1060"
},
{
"name": "PowerShell",
"bytes": "70539"
},
{
"name": "Python",
"bytes": "7661598"
},
{
"name": "Ruby",
"bytes": "1067"
},
{
"name": "Visual Basic",
"bytes": "2135"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from tempfile import NamedTemporaryFile
from unittest import TestCase, mock
from airflow.providers.google.cloud.transfers.gdrive_to_local import GoogleDriveToLocalOperator
TASK_ID = "test-drive-to-local-operator"
FOLDER_ID = "1234567890qwerty"
FILE_NAME = "file.pdf"
GCP_CONN_ID = "google_cloud_default"
class TestGoogleDriveToLocalOperator(TestCase):
@mock.patch("airflow.providers.google.cloud.transfers.gdrive_to_local.GoogleDriveHook")
def test_execute(self, hook_mock):
with NamedTemporaryFile("wb") as temp_file:
op = GoogleDriveToLocalOperator(
task_id=TASK_ID,
folder_id=FOLDER_ID,
file_name=FILE_NAME,
gcp_conn_id=GCP_CONN_ID,
output_file=temp_file.name,
)
meta = {"id": "123xyz"}
hook_mock.return_value.get_file_id.return_value = meta
op.execute(context=None)
hook_mock.assert_called_once_with(
delegate_to=None, gcp_conn_id=GCP_CONN_ID, impersonation_chain=None
)
hook_mock.return_value.download_file.assert_called_once_with(
file_id=meta["id"], file_handle=mock.ANY
)
| {
"content_hash": "8e3cdd6e7015caf17518b7ff122a29f8",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 95,
"avg_line_length": 35.94285714285714,
"alnum_prop": 0.6351351351351351,
"repo_name": "apache/airflow",
"id": "def50026f8fbede3b0f3a43e6ba65e5ec2cfeb50",
"size": "2045",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/transfers/test_gdrive_to_local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
d = {}
for index,value in enumerate(nums):
if value not in d :
d[value] = index
else:
dist = index - d[value]
if dist <= k:
return True
d[value] = index
return False
| {
"content_hash": "74e3f5ee0eeb5e2d710742f92ee8a6d6",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 47,
"avg_line_length": 30.833333333333332,
"alnum_prop": 0.44594594594594594,
"repo_name": "menghanY/LeetCode-Python",
"id": "cd3715b084d181209badcc795dfc0820f14fd7ed",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Array/ContainsDuplicateII.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69719"
}
],
"symlink_target": ""
} |
import requests
import getpass
################################################################################
####################################################################### Config #
################################################################################
urlInsaCloud = 'http://localhost/api/events/'
apiInsacloud_user = input('Insacloud API user: ')
apiInsacloud_pwd = getpass.getpass('Insacloud API password: ')
urlEventful = "http://api.eventful.com/json/events/search"
config_ImportEventDataService = {
'locations':['Lyon', 'Villeurbanne', 'Oullins', 'Decines', 'Vaulx-en-Velin',
'Bron', 'Grenoble', 'Vienne'],
'categories':['music', 'circus', 'movie', 'festival', 'party'],
'image_sizes':'original'
}
################################################################################
####################################################################### Script #
################################################################################
for location in config_ImportEventDataService['locations']:
for category in config_ImportEventDataService['categories']:
print ("\n---------- Getting %s events from %s -------------\n" % (category, location))
query = urlEventful + "?app_key=test_key&location=" + str(location) + "&category=" + str(category)
response = requests.get(query)
if (response.status_code == 200):
events = response.json()
if events != None and events['events'] != None:
for event in events['events']['event']:
if event['image'] != None and event['title'] != None and event['image']['url'] != None:
print(event['title'])
data = {"id_source": event['id'],
"source": "eventful",
"date_start": event['start_time'],
"date_end": event['stop_time'],
'category': category,
"title": event['title'],
"location": event['city_name'],
"venue": event['venue_name'],
"latitude": event['latitude'],
"longitude": event['longitude']
}
posterUrl = event['image']['url'].replace('small', 'original')
poster = requests.get(posterUrl).content
files = {'poster': (posterUrl,
poster,
'image/jpg',
{'Expires': '0'})
}
myResponse = requests.post(urlInsaCloud, data=data, files=files, auth=(apiInsacloud_user, apiInsacloud_pwd), verify=True)
# print(myResponse.text) | {
"content_hash": "fcf936c4cec277d35c2320818f9f67b3",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 133,
"avg_line_length": 48.472727272727276,
"alnum_prop": 0.44373593398349587,
"repo_name": "insacloud/insacloud-back",
"id": "983e3713f0a5c5d1ee5184b9a318d71beb9c2c0e",
"size": "2689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insacloud/services/events/ImportEvents_eventful.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46325"
},
{
"name": "Shell",
"bytes": "3608"
}
],
"symlink_target": ""
} |
import mock
from nose.plugins.skip import SkipTest
from nose.tools import eq_, raises
import sqlalchemy.exc as saexc
import savalidation.tests.examples as ex
from savalidation import ValidationError
class TestFamily(object):
def tearDown(self):
# need this to clear the session after the exception catching below
ex.sess.rollback()
ex.sess.query(ex.Family).delete()
ex.sess.commit()
def test_id_is_auto_increment(self):
f1 = ex.Family(name='f1', reg_num=1)
ex.sess.add(f1)
ex.sess.commit()
f2 = ex.Family(name='f2', reg_num=2)
ex.sess.add(f2)
ex.sess.commit()
eq_(f1.id, f2.id - 1)
def test_edit(self):
f1 = ex.Family(name='test_edit', reg_num=1)
ex.sess.add(f1)
ex.sess.commit()
fid = f1.id
ex.sess.remove()
f1 = ex.sess.query(ex.Family).get(fid)
assert f1.name == 'test_edit'
f1.status = 'foobar'
try:
ex.sess.commit()
assert False, 'exception expected'
except ValidationError as e:
ex.sess.rollback()
expect = {'status': ["Value must be one of: active; inactive; moved (not 'foobar')"]}
eq_(f1.validation_errors, expect)
eq_(str(e), 'validation error(s): <Family id=1, name=test_edit> [status: "Value must be one of: active; inactive; moved (not \'foobar\')"]')
f1.status = 'inactive'
ex.sess.commit()
@raises(saexc.IntegrityError)
def test_name_is_unique(self):
f1 = ex.Family(name='f', reg_num=1)
f2 = ex.Family(name='f', reg_num=2)
ex.sess.add(f1)
ex.sess.add(f2)
ex.sess.commit()
@raises(saexc.IntegrityError)
def test_reg_num_is_unique(self):
f1 = ex.Family(name='f1', reg_num=1)
ex.sess.add(f1)
f2 = ex.Family(name='f2', reg_num=1)
ex.sess.add(f2)
ex.sess.commit()
def test_status_default(self):
f1 = ex.Family(name='f1', reg_num=1)
ex.sess.add(f1)
ex.sess.commit()
eq_(f1.status, 'active')
ex.sess.commit()
def test_invalid_status(self):
try:
f1 = ex.Family(name='f1', reg_num=1, status='foobar')
ex.sess.add(f1)
ex.sess.commit()
assert False, 'exception expected'
except ValidationError as e:
expect = {'status': ["Value must be one of: active; inactive; moved (not 'foobar')"]}
eq_(f1.validation_errors, expect)
eq_(str(e), 'validation error(s): <Family id=None, name=f1> [status: "Value must be one of: active; inactive; moved (not \'foobar\')"]')
def test_missing_regnum(self):
try:
f1 = ex.Family(name='f1', status='active')
ex.sess.add(f1)
ex.sess.commit()
assert False, 'exception expected'
except ValidationError as e:
expect = {'reg_num': ["Please enter a value"]}
eq_(f1.validation_errors, expect)
def test_missing_name(self):
try:
f1 = ex.Family(reg_num=1, status='active')
ex.sess.add(f1)
ex.sess.commit()
assert False, 'exception expected'
except ValidationError as e:
expect = {'name': ["Please enter a value"]}
eq_(f1.validation_errors, expect)
def test_multiple_invalid_instances(self):
try:
f1 = ex.Family(name='f1', status='active')
f2 = ex.Family(name='f2', status='active')
ex.sess.add(f1)
ex.sess.add(f2)
ex.sess.commit()
assert False, 'exception expected'
except ValidationError as e:
eq_(len(e.invalid_instances), 2)
expect = {'reg_num': ["Please enter a value"]}
eq_(f1.validation_errors, expect)
eq_(f2.validation_errors, expect)
def test_missing_both(self):
try:
f1 = ex.Family()
ex.sess.add(f1)
ex.sess.commit()
assert False, 'exception expected'
except ValidationError as e:
expect = {'reg_num': ['Please enter a value'], 'name': ['Please enter a value']}
eq_(len(e.invalid_instances), 1)
eq_(f1.validation_errors, expect)
def test_name_too_long(self):
try:
f1 = ex.Family(name='f1'*100, reg_num=1)
ex.sess.add(f1)
ex.sess.commit()
assert False, 'exception expected'
except ValidationError as e:
expect = {'name': ['Enter a value less than 75 characters long']}
eq_(f1.validation_errors, expect)
class TestPerson(object):
def tearDown(self):
ex.sess.rollback()
def test_id_is_auto_increment(self):
f1 = ex.Person(name_first='f1', name_last='l1', family_role='father', nullable_but_required='f')
ex.sess.add(f1)
ex.sess.commit()
f2 = ex.Person(name_first='f1', name_last='l1', family_role='father', nullable_but_required='f')
f2.name_first = 'foobar'
ex.sess.add(f2)
ex.sess.commit()
eq_(f1.id, f2.id - 1)
def test_family_role_when_invalid(self):
try:
f2 = ex.Person(name_first='f1', name_last='l1', family_role='foobar', nullable_but_required='f')
ex.sess.add(f2)
ex.sess.commit()
assert False, 'should have been an exception'
except ValidationError as e:
assert f2.validation_errors['family_role'][0].startswith('Value must be one of: father; mother; child')
def test_first_name_is_too_long(self):
try:
f2 = ex.Person(name_first='f1'*50, name_last='l1', family_role='father', nullable_but_required='f')
ex.sess.add(f2)
ex.sess.commit()
assert False, 'should have been an exception'
except ValidationError as e:
assert f2.validation_errors['name_first'][0] == 'Enter a value less than 75 characters long'
def test_nullable_but_required(self):
# set to None
try:
f2 = ex.Person(name_first='f1', name_last='l1', family_role='father', nullable_but_required=None)
ex.sess.add(f2)
ex.sess.commit()
assert False, 'should have been an exception'
except ValidationError as e:
ex.sess.rollback()
expect = {'nullable_but_required': ['Please enter a value']}
eq_(f2.validation_errors, expect)
# not given
try:
f2 = ex.Person(name_first='f1', name_last='l1', family_role='father')
ex.sess.add(f2)
ex.sess.commit()
assert False, 'should have been an exception'
except ValidationError as e:
expect = {'nullable_but_required': ['Please enter a value']}
eq_(f2.validation_errors, expect)
@mock.patch('savalidation.tests.examples.Person.get')
def test_before_flush_decorator_and_mocked_methods(self, m_get):
p = ex.Person(name_first='f1', name_last='l1', family_role='father', nullable_but_required='a')
ex.sess.add(p)
ex.sess.commit()
eq_(m_get.call_count, 0)
class TestTypes(object):
def tearDown(self):
# need this to clear the session after the exception catching below
ex.sess.rollback()
def test_integer(self):
inst = ex.IntegerType(fld=10)
# this None helps test "missing" verse "value not entered"
inst.fld2 = None
ex.sess.add(inst)
ex.sess.commit()
inst = ex.IntegerType(fld='5')
ex.sess.add(inst)
ex.sess.commit()
try:
inst = ex.IntegerType(fld='ten', fld2='ten', fld3='ten')
ex.sess.add(inst)
ex.sess.commit()
assert False, 'expected exception'
except ValidationError as e:
expect = {'fld': ['Please enter an integer value'], 'fld2': ['Please enter an integer value'], 'fld3': ['Please enter an integer value']}
eq_(inst.validation_errors, expect)
def test_numeric(self):
inst = ex.NumericType(fld=10.5)
ex.sess.add(inst)
ex.sess.commit()
inst = ex.NumericType(fld='10.5')
ex.sess.add(inst)
ex.sess.commit()
try:
inst = ex.NumericType(fld='ten dot five', fld2='ten dot five')
ex.sess.add(inst)
ex.sess.commit()
assert False, 'expected exception'
except ValidationError as e:
expect = {'fld': ['Please enter a number'], 'fld2': ['Please enter a number']}
eq_(inst.validation_errors, expect)
def test_date_time(self):
inst = ex.DateTimeType(fld='9/23/2010', fld3='10:25:33 am', fld2='2010-09-26 10:47:35 pm')
ex.sess.add(inst)
ex.sess.commit()
try:
inst = ex.DateTimeType(fld='foo', fld3='bar', fld2='baz')
ex.sess.add(inst)
ex.sess.commit()
assert False, 'expected exception'
except ValidationError as e:
expect = {'fld2': ['Unknown date/time string "baz"'], 'fld': ['Please enter the date in the form mm/dd/yyyy'], 'fld3': ['You must enter minutes (after a :)']}
import pprint
pprint.pprint(inst.validation_errors)
pprint.pprint(expect)
class TestOrders(object):
def tearDown(self):
ex.sess.remove()
ex.sess.execute('DELETE FROM %s' % ex.Order.__table__)
ex.sess.execute('DELETE FROM %s' % ex.Order2.__table__)
ex.sess.execute('DELETE FROM %s' % ex.Customer.__table__)
ex.sess.commit()
def test_with_id(self):
c = ex.Customer(name='ts1')
ex.sess.add(c)
ex.sess.flush()
o = ex.Order(customer_id = c.id)
ex.sess.add(o)
ex.sess.commit()
assert o.customer is c
def test_with_reference(self):
c1 = ex.Customer(name='ts1')
o = ex.Order(customer = c1)
ex.sess.add(o)
ex.sess.add(c1)
ex.sess.commit()
#assert o.customer is c1
def test_fk_type_checking(self):
o = ex.Order(customer_id = 'foobar')
ex.sess.add(o)
try:
ex.sess.commit()
assert False
except ValidationError as e:
ex.sess.rollback()
expect = {'customer_id': ['Please enter an integer value']}
eq_(o.validation_errors, expect)
def test_fk_not_null_checking(self):
o = ex.Order()
ex.sess.add(o)
try:
ex.sess.commit()
assert False
except ValidationError as e:
ex.sess.rollback()
expect = {'customer_id': ['Please enter a value']}
eq_(o.validation_errors, expect)
def test_order2_with_reference(self):
c1 = ex.Customer(name='ts1')
o = ex.Order2(customer = c1)
ex.sess.add(o)
ex.sess.add(c1)
ex.sess.commit()
assert o.customer is c1
def test_order2_with_none(self):
o = ex.Order2()
ex.sess.add(o)
try:
ex.sess.commit()
assert False
except ValidationError as e:
ex.sess.rollback()
expect = {'customer_id': ['Please enter a value']}
eq_(o.validation_errors, expect)
def test_text_type(self):
c = ex.Customer(name='ts1')
# note is a Text column type, it needs to have a value for this test
ex.Order(customer=c, note='foo')
ex.sess.add(c)
ex.sess.commit()
# passing without an exception is the goal here. We used to have a bug whe using a Text
# column type that would fail validation.
class TestUnit(object):
def tearDown(self):
# need this to clear the session after the exception catching below
ex.sess.rollback()
ex.sess.execute('DELETE FROM %s' % ex.Family.__table__)
ex.sess.commit()
def test_inst_col_names(self):
f1 = ex.Family(name='f1', reg_num=1)
eq_(f1._sav_column_names(), ['id', 'createdts', 'updatedts', 'name', 'reg_num', 'status'])
def test_class_col_names(self):
eq_(ex.Family._sav_column_names(), ['id', 'createdts', 'updatedts', 'name', 'reg_num', 'status'])
class TestMixin(object):
def tearDown(self):
ex.sess.rollback()
ex.sess.execute('DELETE FROM %s' % ex.NoMixin.__table__)
ex.sess.commit()
def test_no_constructor(self):
nm = ex.NoMixin()
nm.name = 'tnc'
ex.sess.add(nm)
ex.sess.commit()
ex.sess.remove()
nm = ex.sess.query(ex.NoMixin).first()
assert nm.name == 'tnc', nm.name
def test_with_constructor(self):
nm = ex.NoMixin(name='tnc')
ex.sess.add(nm)
ex.sess.commit()
ex.sess.remove()
nm = ex.sess.query(ex.NoMixin).first()
assert nm.name == 'tnc', nm.name
class TestConversions(object):
@classmethod
def teardown_class(self):
ex.sess.rollback()
ex.sess.remove()
def setUp(self):
# need this to clear the session after the exception catching below
ex.sess.rollback()
ex.sess.execute('DELETE FROM %s' % ex.ConversionTester.__table__)
ex.sess.commit()
def test_ok(self):
e1 = ex.ConversionTester(val1='foo')
ex.sess.add(e1)
ex.sess.commit()
ex.sess.remove()
e1 = ex.sess.query(ex.ConversionTester).first()
eq_(e1.val1, 'foo')
def test_conversion_from_factory(self):
e1 = ex.ConversionTester(val3='foo')
ex.sess.add(e1)
ex.sess.commit()
ex.sess.remove()
e1 = ex.sess.query(ex.ConversionTester).first()
eq_(e1.val3, 'oof')
def test_conversion_from_factory_with_override(self):
e1 = ex.ConversionTester(val4='foo')
ex.sess.add(e1)
ex.sess.commit()
ex.sess.remove()
e1 = ex.sess.query(ex.ConversionTester).first()
eq_(e1.val4, 'foo')
def test_conversion_from_kwarg(self):
e1 = ex.ConversionTester(val2='foo')
ex.sess.add(e1)
ex.sess.commit()
ex.sess.remove()
e1 = ex.sess.query(ex.ConversionTester).first()
eq_(e1.val2, 'oof')
def test_validation_failure(self):
try:
e1 = ex.ConversionTester(val3=2)
ex.sess.add(e1)
ex.sess.commit()
assert False
except ValidationError as e:
eq_(len(e.invalid_instances), 1)
expect = {'val3': ["Must be a string type"]}
eq_(e1.validation_errors, expect)
| {
"content_hash": "c34cebeb61de045589b6119b9b6fa876",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 170,
"avg_line_length": 35.04545454545455,
"alnum_prop": 0.5611987166359479,
"repo_name": "marquisthunder/sqlalchemy-validation",
"id": "994b6f7ed308c7a6e27915ac8b8a8796a81dfa23",
"size": "14649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "savalidation/tests/test_examples.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57463"
}
],
"symlink_target": ""
} |
from django import forms
from confucius.models import Review
class ProblemForm(forms.Form):
problem = forms.CharField(label=u'Enter your explanation', help_text=u'Please indicate in a concise way the exact problem with the paper or the assignment.',
min_length=12, widget=forms.Textarea(attrs={'class': 'input-xlarge'}))
class ReviewForm(forms.ModelForm):
confidence_choice = (('1', 'Null'), ('2', 'Low'), ('3', 'Medium'), ('4', 'High'), ('5', 'Expert'))
reviewer_confidence = forms.TypedChoiceField(coerce=int, choices=confidence_choice, widget=forms.RadioSelect)
class Meta:
model = Review
fields = ( 'detailed_commentary', 'commentary_for_president', )
def __init__(self, *args, **kwargs):
enable_reviewer_confidence = kwargs.pop('enable_reviewer_confidence', None)
super(ReviewForm, self).__init__(*args, **kwargs)
review = kwargs.pop('instance', None)
self.fields["reviewer_confidence"].initial = '0'
if review is not None:
self.fields["reviewer_confidence"].initial = review.reviewer_confidence
if not enable_reviewer_confidence:
del self.fields["reviewer_confidence"]
def save(self, **kwargs):
review = super(ReviewForm, self).save(commit=False)
evaluation = kwargs.pop('overall_evaluation', None)
enable_reviewer_confidence = kwargs.pop('enable_reviewer_confidence', None)
review.overall_evaluation = evaluation
if enable_reviewer_confidence:
review.reviewer_confidence = self.cleaned_data['reviewer_confidence']
else:
review.reviewer_confidence = 0
review.save()
return review
def clean(self):
cleaned_data = super(ReviewForm, self).clean()
instance = self.instance
conference = instance.get_assignment().conference
if conference.are_reviews_over == True:
raise forms.ValidationError('The Reviews are over for now.')
if conference.are_reviews_notstarted == True:
raise forms.ValidationError('The Reviews are not started.')
return cleaned_data
| {
"content_hash": "9016c29ed8c6e59e31d2ea397cb5c6b2",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 161,
"avg_line_length": 37.43103448275862,
"alnum_prop": 0.6499309074159374,
"repo_name": "jfouca/confucius",
"id": "ef233d5c02efee98d79665cc6a16f652dbfbce8e",
"size": "2171",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "confucius/forms/review.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image, height, width, bbox,
fast_mode=True,
scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.summary.image('image_with_bounding_boxes', image_with_box)
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [height, width], method),
num_cases=num_resize_cases)
tf.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 4 ways to do it.
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=4)
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
distorted_image = tf.subtract(distorted_image, 0.5)
distorted_image = tf.multiply(distorted_image, 2.0)
return distorted_image
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def preprocess_image(image, height, width,
is_training=False,
bbox=None,
fast_mode=True):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image. If dtype is
tf.float32 then the range should be [0, 1], otherwise it would converted
to tf.float32 assuming that the range is [0, MAX], where MAX is largest
positive representable number for int(8/16/32) data type (see
`tf.image.convert_image_dtype` for details).
height: integer, image expected height.
width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
return preprocess_for_train(image, height, width, bbox, fast_mode)
else:
return preprocess_for_eval(image, height, width)
| {
"content_hash": "7801ece2bf220b9c200f12a6a6192e68",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 80,
"avg_line_length": 44.08843537414966,
"alnum_prop": 0.6673352877642339,
"repo_name": "AlgoHunt/nerual_style_transfer",
"id": "45e81fd8cf4a7bcfd59a233d0bb27b833b835a17",
"size": "13651",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "preprocessing/inception_preprocessing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "421727"
},
{
"name": "Python",
"bytes": "479856"
},
{
"name": "Shell",
"bytes": "7427"
}
],
"symlink_target": ""
} |
from ..base import * # noqa
INSTALLED_APPS += [
'django.contrib.admin',
'apps.admin_site',
]
ROOT_URLCONF = 'apps.admin_site.urls'
AUTH_USER_MODEL = 'core.AdminUser'
| {
"content_hash": "5237199173d3257ebb2728acb9727956",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 37,
"avg_line_length": 16.272727272727273,
"alnum_prop": 0.6480446927374302,
"repo_name": "thnee/django-template",
"id": "4b8fc43c2ad8f20139d353e364387ffc1f6b850f",
"size": "180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_name/settings/apps/admin_site.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17169"
}
],
"symlink_target": ""
} |
import datetime
from decimal import Decimal
from django.contrib import admin
from django.contrib.auth.models import User as DjangoUser
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.contrib.localflavor.us.models import USStateField, PhoneNumberField
from lebay.apps.base.models import BaseModel
from lebay.apps.lebay.constants import AUCTION_ITEM_CATEGORY_CHOICES, AUCTION_ITEM_STATUS_CHOICES, AUCTION_ITEM_CATEGORY_GENERAL, AUCTION_ITEM_STATUS_IDLE, AUCTION_ITEM_CONDITION_CHOICES, AUCTION_EVENT_SHIPPING_CHOICES, SALES_PAYMENT_STATUS_CHOICES, SALES_PAYMENT_STATUS_PROCESSING, AUCTION_ITEM_STATUS_RUNNING, AUCTION_EVENT_SHIPPING_USPS
class User(DjangoUser):
address_line_1 = models.CharField(max_length=100)
address_line_2 = models.CharField(max_length=100, blank=True)
city = models.CharField(max_length=50)
state = USStateField()
zipcode = models.CharField(max_length=10)
phone = PhoneNumberField()
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
def is_seller(self):
try:
seller = self.seller
return True
except ObjectDoesNotExist, e:
return False
class ItemCategory(BaseModel):
title = models.CharField(max_length=100)
description = models.TextField(blank=True)
parent = models.ForeignKey('self', blank=True, null=True)
def __unicode__(self):
return u'%s' % self.title
class Seller(BaseModel):
user = models.OneToOneField(User, related_name='seller')
paypal_email = models.EmailField()
default_shipping_method = models.IntegerField(choices=AUCTION_EVENT_SHIPPING_CHOICES, default=AUCTION_EVENT_SHIPPING_USPS)
default_shipping_detail = models.CharField(max_length=100, blank=True, null=True)
default_payment_detail = models.CharField(max_length=200, blank=True, null=True)
def __unicode__(self):
return u'Seller profile of %s' % self.user.username
class Item(BaseModel):
title = models.CharField(max_length=200)
description = models.TextField(blank=True)
condition = models.IntegerField(choices=AUCTION_ITEM_CONDITION_CHOICES)
seller = models.ForeignKey(User, related_name='auction_items')
category = models.ForeignKey(ItemCategory, related_name='auction_items')
status = models.IntegerField(choices=AUCTION_ITEM_STATUS_CHOICES, default=AUCTION_ITEM_STATUS_IDLE)
def __unicode__(self):
return u'%s' % self.title
def get_condition(self):
return dict(AUCTION_ITEM_CONDITION_CHOICES).get(self.condition, 'N/A')
def get_status(self):
return dict(AUCTION_ITEM_STATUS_CHOICES).get(self.status, 'N/A')
class AuctionEventManager(models.Manager):
def get_current_auctions(self):
current_time = datetime.datetime.now()
return self.filter(item__status=AUCTION_ITEM_STATUS_RUNNING, start_time__lt=current_time, end_time__gt=current_time)
class AuctionEvent(BaseModel):
item = models.ForeignKey(Item, related_name='auction_events')
shipping_method = models.IntegerField(choices=AUCTION_EVENT_SHIPPING_CHOICES)
shipping_detail = models.CharField(max_length=100, blank=True)
payment_detail = models.CharField(max_length=200, blank=True)
start_time = models.DateTimeField(help_text=u'Format (Hour & Minute are optional): 10/25/2006 14:30')
end_time = models.DateTimeField(help_text=u'Format (Hour & Minute are optional): 10/25/2006 14:30')
starting_price = models.DecimalField(default=Decimal('0.00'), max_digits=5, decimal_places=2)
shipping_fee = models.DecimalField(default=Decimal('0.00'), max_digits=5, decimal_places=2)
reserve_price = models.DecimalField(default=Decimal('0.00'), blank=True, max_digits=5, decimal_places=2)
winning_bidder = models.ForeignKey(User, related_name='won_auctions', blank=True, null=True)
objects = AuctionEventManager()
def __unicode__(self):
return u'%s listed on %s' % (self.item.title, self.start_time)
def has_started(self):
return datetime.datetime.now() >= self.start_time
def has_ended(self):
return datetime.datetime.now() >= self.end_time
def is_running(self):
return self.has_started() and not self.has_ended() and self.item.status == AUCTION_ITEM_STATUS_RUNNING
def get_shipping_method(self):
return dict(AUCTION_EVENT_SHIPPING_CHOICES).get(int(self.shipping_method), 'N/A')
def get_current_price(self):
current_price = self.starting_price
bid_count = self.bids.count()
if bid_count:
highest_bid = self.bids.order_by('-amount')[0]
current_price = highest_bid.amount
return current_price
def get_time_until_end(self):
delta = self.end_time - datetime.datetime.now()
if delta.days < 0:
return '0 seconds'
else:
weeks = delta.days / 7
days = delta.days % 7
hours = delta.seconds / 3600
minutes = (delta.seconds % 3600) / 60
seconds = (delta.seconds % 3600) % 60
time_string = ''
if weeks:
time_string += '%s weeks ' % weeks
if days:
time_string += '%s days ' % days
if hours:
time_string += '%s hours ' % hours
if minutes:
time_string += '%s minutes ' % minutes
if seconds:
time_string += '%s seconds' % seconds
return time_string
def is_paid(self):
return self.sales.count() > 0
def get_payment_status(self):
if self.is_paid():
return dict(SALES_PAYMENT_STATUS_CHOICES).get(self.sales.order_by('-time_created')[0].payment_status)
else:
return 'Unpaid'
class Sales(BaseModel):
auction_event = models.ForeignKey(AuctionEvent, related_name='sales')
payment_status = models.IntegerField(choices=SALES_PAYMENT_STATUS_CHOICES, default=SALES_PAYMENT_STATUS_PROCESSING)
invoice_number = models.CharField(max_length=200, unique=True)
def __unicode__(self):
return u'Invoice for %s' % self.auction_event
class Bid(BaseModel):
auction_event = models.ForeignKey(AuctionEvent, related_name='bids')
bidder = models.ForeignKey(User, related_name='bids')
amount = models.DecimalField(default=Decimal('0.00'), max_digits=5, decimal_places=2, help_text=u'All bids are final. Price in US dollars.')
def __unicode__(self):
return u'Placed on %s by %s' % (self.auction_event.item.title, self.bidder.username)
admin.site.register(AuctionEvent)
admin.site.register(Bid)
admin.site.register(Item)
admin.site.register(ItemCategory)
admin.site.register(Seller)
admin.site.register(Sales)
admin.site.register(User) | {
"content_hash": "aec3fafc03222b25ef387bfdbc293682",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 339,
"avg_line_length": 41.8109756097561,
"alnum_prop": 0.6737640367507657,
"repo_name": "tarequeh/little-ebay",
"id": "10a8a0627b05c1a057ab88f24464382f9376ee4f",
"size": "6857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lebay/apps/lebay/models.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import pqb
from ... import types
from . import exceptions
from . import utils
import uuid
class EdgeDriver(types.BaseEdgeDriver):
def create(self, from_, to, data = {}):
eType = data.get('class', 'E')
QB = pqb.Select()
if isinstance(from_, dict):
from_Class, from_Value = from_.get('class', 'V'), from_.get('uuid')
elif utils.validate_uuid4(from_):
from_Class, from_Value = 'V', from_
else:
raise ValueError ('Unrecognizable Vertex Reference [%s]' % from_)
from_ = "(%s)" % QB.from_(from_Class).where({
'uuid': from_Value
}).result()
QB = pqb.Select()
if isinstance(to, dict):
toClass, toValue = to.get('class', 'V'), to.get('uuid')
elif utils.validate_uuid4(to):
toClass, toValue = 'V', to
else:
raise ValueError ('Unrecognizable Vertex Reference [%s]' % to)
to = "(%s)" % QB.from_(toClass).where({
'uuid': toValue
}).result()
QB = pqb.Create('EDGE').class_(eType).set(data).from_(from_).to(to)
uid = uuid.uuid4()
QB.set('uuid', str(uid))
QB.set('suid', "%x" % (uid.fields[0]))
QB.set('type', 'edge')
QB.set('class', eType)
SQL = QB.result()
response = self.driver.query(SQL)
res = response[0]
return res
def update(self, criteria = {}, data = {}):
eType = criteria.get('class', 'E')
SQL = pqb.Update(eType).set(data).where(criteria).result()
response = self.driver.query(SQL)
return response[0]
def delete(self, eType, criteria = {}):
eType = criteria.get('class', 'E')
SQL = pqb.Delete('EDGE').class_(eType).where(criteria).result()
response = self.driver.query(SQL)
return response[0]
def find(self, criteria = {}, **kwargs):
depth = kwargs.get('depth', 0)
eType = criteria.get('class', 'E')
SQL = pqb.Select().from_(eType).where(criteria).result()
response = self.driver.query(SQL, depth=depth)
res = response
return res | {
"content_hash": "aca9b966a2c2119e66cbb4e57c23f534",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 33.38461538461539,
"alnum_prop": 0.5350230414746544,
"repo_name": "josegomezr/graph_db",
"id": "040cdd54a78e364d382927195f9afe6266baa858",
"size": "2170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graph_db/driver/orientdb/edge.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25304"
}
],
"symlink_target": ""
} |
'''@file cross_enthropytrainer_rec.py
contains the CrossEnthropyTrainerRec for reconstruction of the audio samples'''
import tensorflow as tf
from nabu.neuralnetworks.trainers import trainer
from nabu.neuralnetworks import ops
class JointFeaturesTextCost(trainer.Trainer):
'''A trainer that minimises the cross-enthropy loss, the output sequences
must be of the same length as the input sequences'''
def compute_loss(self, targets, logits, logit_seq_length,
target_seq_length):
'''
Compute the loss
Creates the operation to compute the cross-entropy loss for every input
frame (if you want to have a different loss function, overwrite this
method)
Args:
targets: a tupple of targets, the first one being a
[batch_size, max_target_length] tensor containing the real
targets, the second one being [batch_size, max_audioseq_length]
tensor containing the audio samples or other extra information.
logits: a tuple of [batch_size, max_logit_length, dim] tensors
containing the logits for the text and the audio samples
logit_seq_length: the length of all the logit sequences as a tuple
of [batch_size] vectors
target_seq_length: the length of all the target sequences as a
tupple of two [batch_size] vectors, both for one of the elements
in the targets tupple
Returns:
a scalar value containing the loss
'''
with tf.name_scope('cross_entropy_loss'):
## first process text logits and targets
# extract the text logits out of the tuple
text_logits = logits[0]
text_logit_seq_length = logit_seq_length[0]
text_targets = targets[0]
text_target_seq_length = target_seq_length[0]
loss_text = ops.cross_entropy_integers_logits_with_appending_eos(
text_targets, text_logits,
text_logit_seq_length, text_target_seq_length)
## next process reconstruction targets and logits
#compute the mean squared variance of the reconstruction
rec_targets = targets[1]
rec_logits = logits[1]
rec_target_length = target_seq_length[1]
#compute the mean squared variance of the reconstruction
loss_features = ops.mse(rec_targets, rec_logits, rec_target_length)
## finally combine the two loss functions
# get the tradeoff parameters
tradeoff = float(self.conf['loss_trade_off'])
if tradeoff < 0 or tradeoff > 1:
raise Exception('Trade off parameter for the loss function \
should be between 0 and 1')
# make a combination of the two loss functions
loss = tradeoff*loss_text + (1-tradeoff)*loss_features
return loss
| {
"content_hash": "152fafda28844df4a5bf4d8adf9710ff",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 80,
"avg_line_length": 40.472972972972975,
"alnum_prop": 0.6253756260434057,
"repo_name": "JeroenBosmans/nabu",
"id": "51112e5aec436b935fa16ba455fa30baf6ec59f5",
"size": "2995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nabu/neuralnetworks/trainers/joint_features_text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "395778"
},
{
"name": "Shell",
"bytes": "188"
}
],
"symlink_target": ""
} |
import platform
import re
# Platform identification constants.
UNKNOWN = 0
RASPBERRY_PI = 1
BEAGLEBONE_BLACK = 2
MINNOWBOARD = 3
def platform_detect():
"""Detect if running on the Raspberry Pi or Beaglebone Black and return the
platform type. Will return RASPBERRY_PI, BEAGLEBONE_BLACK, or UNKNOWN."""
# Handle Raspberry Pi
pi = pi_version()
if pi is not None:
return RASPBERRY_PI
# Handle Beaglebone Black
# TODO: Check the Beaglebone Black /proc/cpuinfo value instead of reading
# the platform.
plat = platform.platform()
if plat.lower().find('armv7l-with-debian') > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find('armv7l-with-ubuntu') > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find('armv7l-with-glibc2.4') > -1:
return BEAGLEBONE_BLACK
# Handle Minnowboard
# Assumption is that mraa is installed
try:
import mraa
if mraa.getPlatformName()=='MinnowBoard MAX':
return MINNOWBOARD
except ImportError:
pass
# Couldn't figure out the platform, just return unknown.
return UNKNOWN
def pi_revision():
"""Detect the revision number of a Raspberry Pi, useful for changing
functionality like default I2C bus based on revision."""
# Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History
with open('/proc/cpuinfo', 'r') as infile:
for line in infile:
# Match a line of the form "Revision : 0002" while ignoring extra
# info in front of the revsion (like 1000 when the Pi was over-volted).
match = re.match('Revision\s+:\s+.*(\w{4})$', line, flags=re.IGNORECASE)
if match and match.group(1) in ['0000', '0002', '0003']:
# Return revision 1 if revision ends with 0000, 0002 or 0003.
return 1
elif match:
# Assume revision 2 if revision ends with any other 4 chars.
return 2
# Couldn't find the revision, throw an exception.
raise RuntimeError('Could not determine Raspberry Pi revision.')
def pi_version():
"""Detect the version of the Raspberry Pi. Returns either 1, 2 or
None depending on if it's a Raspberry Pi 1 (model A, B, A+, B+),
Raspberry Pi 2 (model B+), or not a Raspberry Pi.
"""
# Check /proc/cpuinfo for the Hardware field value.
# 2708 is pi 1
# 2709 is pi 2
# Anything else is not a pi.
with open('/proc/cpuinfo', 'r') as infile:
cpuinfo = infile.read()
# Match a line like 'Hardware : BCM2709'
match = re.search('^Hardware\s+:\s+(\w+)$', cpuinfo,
flags=re.MULTILINE | re.IGNORECASE)
if not match:
# Couldn't find the hardware, assume it isn't a pi.
return None
if match.group(1) == 'BCM2708':
# Pi 1
return 1
elif match.group(1) == 'BCM2709':
# Pi 2
return 2
else:
# Something else, not a pi.
return None
| {
"content_hash": "c05c54ea10dab5eb6d553e78e5d720fe",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 94,
"avg_line_length": 35.56976744186046,
"alnum_prop": 0.6142530238640078,
"repo_name": "CardosoTech/CardosoTech_Python_GPIO",
"id": "d2ac867539411d6fa9ee719e67db5175dbf17fe6",
"size": "3059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CardosoTech_GPIO/Platform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124306"
}
],
"symlink_target": ""
} |
from model.credentials import Credentials
def test_success_login_mk(app):
app.session.login(Credentials(login="38096000000", password="xxx"))
app.mk.hello_mk()
app.mk.merge_from_mk()
app.session.logout()
| {
"content_hash": "505cd106c10a862545ef800d52bf9a57",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 71,
"avg_line_length": 27.75,
"alnum_prop": 0.7117117117117117,
"repo_name": "AlexBenyuh/python_training",
"id": "dc337456449257200f25a598a70331711c0317cc",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_success_login.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5604"
}
],
"symlink_target": ""
} |
"""Constants used by multiple Tasmota modules."""
CONF_DISCOVERY_PREFIX = "discovery_prefix"
DATA_REMOVE_DISCOVER_COMPONENT = "tasmota_discover_{}"
DATA_UNSUB = "tasmota_subscriptions"
DEFAULT_PREFIX = "tasmota/discovery"
DOMAIN = "tasmota"
PLATFORMS = [
"binary_sensor",
"light",
"sensor",
"switch",
]
TASMOTA_EVENT = "tasmota_event"
| {
"content_hash": "93fd13f9e40bb956cdc0fb73b543318b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 54,
"avg_line_length": 19.77777777777778,
"alnum_prop": 0.6910112359550562,
"repo_name": "balloob/home-assistant",
"id": "0f4dfde1646061c0701d7f0134009bfd55048e0a",
"size": "356",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tasmota/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "12903869"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from apiclient import discovery
from httplib2 import Http
from oauth2client import client, file, tools
SCOPES = "https://www.googleapis.com/auth/forms.body"
DISCOVERY_DOC = "https://forms.googleapis.com/$discovery/rest?version=v1"
store = file.Storage('token.json')
creds = None
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secrets.json', SCOPES)
creds = tools.run_flow(flow, store)
form_service = discovery.build('forms', 'v1', http=creds.authorize(
Http()), discoveryServiceUrl=DISCOVERY_DOC, static_discovery=False)
form = {
"info": {
"title": "My new quiz",
}
}
# Creates the initial form
result = form_service.forms().create(body=form).execute()
# JSON to convert the form into a quiz
update = {
"requests": [
{
"updateSettings": {
"settings": {
"quizSettings": {
"isQuiz": True
}
},
"updateMask": "quizSettings.isQuiz"
}
}
]
}
# Converts the form into a quiz
question_setting = form_service.forms().batchUpdate(formId=result["formId"],
body=update).execute()
# Print the result to see it's now a quiz
getresult = form_service.forms().get(formId=result["formId"]).execute()
print(getresult)
# [END forms_convert_form]
| {
"content_hash": "77c3393b9c888fe733c8d3261fa554e6",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 76,
"avg_line_length": 28.274509803921568,
"alnum_prop": 0.6061026352288488,
"repo_name": "gsuitedevs/python-samples",
"id": "bfb6ea8a95d985d49b72aa63b309706b173d2769",
"size": "2047",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "forms/snippets/convert_form.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "401984"
}
],
"symlink_target": ""
} |
"""
THIS WILL CLEAR YOUR DATABASE AND FILL IT WITH TEST DATA!
Prepare the local database for load testing.
It does the following:
- Creates an admin user.
- Creates 1000 channels. Create 3 users for each channel, and assign 1 as the editor and the other two as viewers.
"""
import logging
import os
import subprocess
import sys
import warnings
# output any info logs
logging.basicConfig(level="INFO")
# set sys.path to include the contentcuration dir
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
cc_dir = os.path.join(root_dir, "contentcuration")
sys.path.append(cc_dir)
# set the settings module
os.putenv("DJANGO_SETTINGS_MODULE", "contentcuration.test_settings")
from django.core.management import call_command
from contentcuration.models import Channel, User
# CONSTANTS
NUM_CHANNELS = 1000
NUM_NODES_PER_CHANNEL = 500
from contentcuration.tests.utils import mixer
mixer.register(
User,
information="{}",
content_defaults="{}",
policies="{}"
)
if __name__ == "__main__":
warnings.warn("THIS WILL CLEAR YOUR DATABASE AND FILL IT WITH TEST DATA!")
logging.info("Clearing the DB")
call_command("flush", "--noinput")
# set up our DB from scratch and create our user
logging.info("Setting up the database")
call_command("setup")
# create NUM_CHANNELS channels using mixer
logging.info("Creating {} channels".format(NUM_CHANNELS))
for _ in range(NUM_CHANNELS):
editor = mixer.blend(User)
c = mixer.blend(Channel)
c.editors.add(editor)
viewers = mixer.cycle(2).blend(User)
for v in viewers:
v.view_only_channels.add(c)
v.save()
c.save()
# start the server in prod mode
subprocess.call(["yarn", "run", "devserver"])
| {
"content_hash": "2bd1bdc89748973b364f9ad7cd3829c8",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 114,
"avg_line_length": 26.391304347826086,
"alnum_prop": 0.6897309170785283,
"repo_name": "DXCanas/content-curation",
"id": "f97e1a72e6b7e9c321bec66ae546ac1ba7d5cb0d",
"size": "1821",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "performance/prep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "173955"
},
{
"name": "Dockerfile",
"bytes": "2215"
},
{
"name": "HTML",
"bytes": "503467"
},
{
"name": "JavaScript",
"bytes": "601189"
},
{
"name": "Makefile",
"bytes": "3409"
},
{
"name": "Python",
"bytes": "813881"
},
{
"name": "Shell",
"bytes": "6970"
},
{
"name": "Smarty",
"bytes": "6584"
},
{
"name": "Vue",
"bytes": "21539"
}
],
"symlink_target": ""
} |
"""
Read a result info files and filter based on a predicate
"""
from load_klee_runner import add_KleeRunner_to_module_search_path
add_KleeRunner_to_module_search_path()
from KleeRunner import ResultInfo
import argparse
import logging
import os
import pprint
import re
import sys
import yaml
_logger = None
def main(args):
global _logger
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-l", "--log-level", type=str, default="info",
dest="log_level",
choices=['debug', 'info', 'warning', 'error'])
parser.add_argument('result_info_file',
help='Result info file',
type=argparse.FileType('r'))
parser.add_argument('predicate',
type=str,
help="python expression to evaluate on result 'r' or index 'index'")
parser.add_argument('-o', '--output',
type=argparse.FileType('w'),
default=sys.stdout,
help='Output location (default stdout)')
pargs = parser.parse_args()
logLevel = getattr(logging, pargs.log_level.upper(), None)
logging.basicConfig(level=logLevel)
_logger = logging.getLogger(__name__)
resultInfos = ResultInfo.loadRawResultInfos(pargs.result_info_file)
# Make shallow copy
newResultInfos = resultInfos.copy()
newResultInfos['results'] = []
# filter out non matching jobs by only copying over results that
# match the predicate
keepCount = 0
for (index, r) in enumerate(resultInfos['results']):
# FIXME: Should we try sanity check the predicate? The user
# could specify literaly anything and could be dangerous to
# execute.
predicate = eval('lambda r, index: {}'.format(pargs.predicate))
if predicate(r, index):
_logger.debug('Keeping result "{}"'.format(r))
newResultInfos['results'].append(r)
keepCount += 1
else:
_logger.debug('Removing result "{}"'.format(r))
# Output as YAML
pargs.output.write('# Automatically generated result info\n')
pargs.output.write(yaml.dump(newResultInfos, default_flow_style=False))
_logger.info('# kept: {}'.format(keepCount))
_logger.info('# removed: {}'.format(
len(resultInfos['results']) - keepCount))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "d7f5853626d24ae5de1fe09580298bf4",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 92,
"avg_line_length": 33.351351351351354,
"alnum_prop": 0.6110210696920584,
"repo_name": "delcypher/klee-runner",
"id": "654d314135429a19424ddf6eea4ff026ed271521",
"size": "2628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/result-info-filter.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "491174"
},
{
"name": "Shell",
"bytes": "103"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import addons.base.models
from django.db import migrations, models
import django_extensions.db.fields
import osf.models.base
import osf.utils.datetime_aware_jsonfield
import osf.utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NodeSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('is_deleted', models.BooleanField(default=False)),
('deleted', osf.utils.fields.NonNaiveDateTimeField(blank=True, null=True)),
('user', models.TextField(blank=True, null=True)),
('repo', models.TextField(blank=True, null=True)),
('hook_id', models.TextField(blank=True, null=True)),
('hook_secret', models.TextField(blank=True, null=True)),
('registration_data', osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONField(blank=True, default=dict, encoder=osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONEncoder, null=True)),
],
options={
'abstract': False,
},
bases=(models.Model, osf.models.base.QuerySetExplainMixin, addons.base.models.BaseStorageAddon),
),
migrations.CreateModel(
name='UserSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('is_deleted', models.BooleanField(default=False)),
('deleted', osf.utils.fields.NonNaiveDateTimeField(blank=True, null=True)),
('oauth_grants', osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONField(blank=True, default=dict, encoder=osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONEncoder)),
],
options={
'abstract': False,
},
bases=(models.Model, osf.models.base.QuerySetExplainMixin),
),
]
| {
"content_hash": "a9e9966436c999474d4ac1c7a0012418",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 203,
"avg_line_length": 52.21818181818182,
"alnum_prop": 0.6340529247910863,
"repo_name": "cslzchen/osf.io",
"id": "617fc761a88582edacd7145cbb4307d7327b1f1c",
"size": "2946",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "addons/github/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93635"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "373738"
},
{
"name": "JavaScript",
"bytes": "1596130"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "679193"
},
{
"name": "Python",
"bytes": "11612029"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
"""
Implementation of the OSGi LogService, based on Python standard logging
:author: Thomas Calmant
:copyright: Copyright 2020, Thomas Calmant
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import collections
import datetime
import logging
import sys
import time
import traceback
# Pelix
import pelix.framework
from pelix.constants import BundleActivator
from pelix.misc import (
LOG_SERVICE,
LOG_READER_SERVICE,
PROPERTY_LOG_LEVEL,
PROPERTY_LOG_MAX_ENTRIES,
)
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# Local logger
logger = logging.getLogger(__name__)
# Definition of the log levels (OSGi values)
LOG_ERROR = 1
LOG_WARNING = 2
LOG_INFO = 3
LOG_DEBUG = 4
# OSGi level => Python logging level
OSGI_TO_LEVEL = {
LOG_DEBUG: logging.DEBUG,
LOG_INFO: logging.INFO,
LOG_WARNING: logging.WARNING,
LOG_ERROR: logging.ERROR,
}
# Python logging level => OSGi level
LEVEL_TO_OSGI = {
logging.DEBUG: LOG_DEBUG,
logging.INFO: LOG_INFO,
logging.WARNING: LOG_WARNING,
logging.ERROR: LOG_ERROR,
logging.CRITICAL: LOG_ERROR,
}
# ------------------------------------------------------------------------------
class LogEntry(object):
"""
Represents a log entry
"""
__slots__ = (
"__bundle",
"__exception",
"__level",
"__message",
"__reference",
"__time",
"__record",
)
def __init__(self, level, message, exception, bundle, reference):
"""
:param level: The Python log level of the entry
:param message: A human readable message
:param exception: The exception associated to the entry
:param bundle: The bundle that created the entry
:param reference: The service reference associated to the entry
"""
self.__bundle = bundle
self.__exception = exception
self.__level = level
self.__message = message
self.__reference = reference
self.__time = time.time()
self.__record = None
def __str__(self):
"""
String representation
"""
values = [
# 7: length of "WARNING"
"{0: ^7} ::".format(logging.getLevelName(self.__level)),
# Date
str(datetime.datetime.fromtimestamp(self.__time)),
"::",
]
if self.__bundle:
# Bundle name
values.append(
"{0: <20s} ::".format(self.__bundle.get_symbolic_name())
)
# Message
values.append(self.__message)
if not self.__exception:
# Print as is
return " ".join(values)
# Print the exception too
return "{0}\n{1}".format(" ".join(values), self.__exception)
@property
def bundle(self):
"""
The bundle that created this entry
"""
return self.__bundle
@property
def message(self):
"""
The message associated to this entry
"""
return self.__message
@property
def exception(self):
"""
The exception associated to this entry
"""
return self.__exception
@property
def level(self):
"""
The log level of this entry (Python constant)
"""
return self.__level
@property
def osgi_level(self):
"""
The log level of this entry (OSGi constant)
"""
return LEVEL_TO_OSGI.get(self.__level, LOG_INFO)
@property
def reference(self):
"""
The reference to the service associated to this entry
"""
return self.__reference
@property
def time(self):
"""
The timestamp of this entry
"""
return self.__time
def to_record(self):
# type: () -> logging.LogRecord
"""
Returns this object as a ``logging.LogRecord``
"""
if self.__record is None:
# Construct the record on demand
self.__record = self.__make_record()
return self.__record
def __make_record(self):
"""
Converts this object into a ``logging.LogRecord`` object
"""
# Extract local details
bundle = self.bundle
name = bundle.get_symbolic_name()
pathname = bundle.get_location()
lineno = 0
args = []
func = "n/a"
sinfo = None
level = self.level
msg = self.message
exc_info = self.exception
# Construct the record
record = logging.LogRecord(
name, level, pathname, lineno, msg, args, exc_info, func, sinfo
)
# Fix the time related entries
log_start_time = record.created - (record.relativeCreated / 1000)
creation_time = self.__time
record.created = creation_time
record.msecs = (creation_time - int(creation_time)) * 1000
record.relativeCreated = (creation_time - log_start_time) * 1000
return record
class LogReaderService:
"""
The LogReader service
"""
def __init__(self, context, max_entries):
"""
:param context: The bundle context
:param max_entries: Maximum stored entries
"""
self._context = context
self.__logs = collections.deque(maxlen=max_entries)
self.__listeners = set()
def add_log_listener(self, listener):
"""
Subscribes a listener to log events.
A log listener is an object providing with a ``logged`` method, with
the following signature:
.. code-block:: python
def logged(self, log_entry):
'''
A log entry (LogEntry) has been added to the log service
'''
# ...
:param listener: A new listener
"""
if listener is not None:
self.__listeners.add(listener)
def remove_log_listener(self, listener):
"""
Unsubscribes a listener from log events.
:param listener: The listener to remove
"""
self.__listeners.discard(listener)
def get_log(self):
"""
Returns the logs events kept by the service
:return: A tuple of log entries
"""
return tuple(self.__logs)
def _store_entry(self, entry):
"""
Stores a new log entry and notifies listeners
:param entry: A LogEntry object
"""
# Get the logger and log the message
self.__logs.append(entry)
# Notify listeners
for listener in self.__listeners.copy():
try:
listener.logged(entry)
except Exception as ex:
# Create a new log entry, without using logging nor notifying
# listener (to avoid a recursion)
err_entry = LogEntry(
logging.WARNING,
"Error notifying logging listener {0}: {1}".format(
listener, ex
),
sys.exc_info(),
self._context.get_bundle(),
None,
)
# Insert the new entry before the real one
self.__logs.pop()
self.__logs.append(err_entry)
self.__logs.append(entry)
class LogServiceInstance:
# pylint: disable=R0903
"""
Instance of the log service given to a bundle by the factory
"""
__slots__ = ("__reader", "__bundle")
def __init__(self, reader, bundle):
"""
:param reader: The Log Reader service
:param bundle: Bundle associated to this instance
"""
self.__reader = reader
self.__bundle = bundle
def log(self, level, message, exc_info=None, reference=None):
# pylint: disable=W0212
"""
Logs a message, possibly with an exception
:param level: Severity of the message (Python logging level)
:param message: Human readable message
:param exc_info: The exception context (sys.exc_info()), if any
:param reference: The ServiceReference associated to the log
"""
if not isinstance(reference, pelix.framework.ServiceReference):
# Ensure we have a clean Service Reference
reference = None
if exc_info is not None:
# Format the exception to avoid memory leaks
try:
exception_str = "\n".join(traceback.format_exception(*exc_info))
except (TypeError, ValueError, AttributeError):
exception_str = "<Invalid exc_info>"
else:
exception_str = None
# Store the LogEntry
entry = LogEntry(
level, message, exception_str, self.__bundle, reference
)
self.__reader._store_entry(entry)
class LogServiceFactory(logging.Handler):
"""
Log Service Factory: provides a logger per bundle
"""
def __init__(self, context, reader, level):
"""
:param context: The bundle context
:param reader: The Log Reader service
:param level: The minimal log level of this handler
"""
logging.Handler.__init__(self, level)
self._framework = context.get_framework()
self._reader = reader
def _bundle_from_module(self, module_object):
"""
Find the bundle associated to a module
:param module_object: A Python module object
:return: The Bundle object associated to the module, or None
"""
try:
# Get the module name
module_object = module_object.__name__
except AttributeError:
# We got a string
pass
return self._framework.get_bundle_by_name(module_object)
def emit(self, record):
# pylint: disable=W0212
"""
Handle a message logged with the logger
:param record: A log record
"""
# Get the bundle
bundle = self._bundle_from_module(record.module)
# Convert to a LogEntry
entry = LogEntry(
record.levelno, record.getMessage(), None, bundle, None
)
self._reader._store_entry(entry)
def get_service(self, bundle, registration):
# pylint: disable=W0613
"""
Returns an instance of the log service for the given bundle
:param bundle: Bundle consuming the service
:param registration: Service registration bean
:return: An instance of the logger
"""
return LogServiceInstance(self._reader, bundle)
@staticmethod
def unget_service(bundle, registration):
"""
Releases the service associated to the given bundle
:param bundle: Consuming bundle
:param registration: Service registration bean
"""
pass
@BundleActivator
class Activator(object):
"""
The bundle activator
"""
def __init__(self):
self.__reader_reg = None
self.__factory_reg = None
self.__factory = None
@staticmethod
def get_level(context):
"""
Get the log level from the bundle context (framework properties)
:param context: A bundle context
:return: A log level (int)
"""
# Get the log level
level_value = context.get_property(PROPERTY_LOG_LEVEL)
if level_value:
for converter in int, logging.getLevelName:
try:
parsed_level = converter(level_value)
if isinstance(parsed_level, int):
# Got a valid level
return parsed_level
except (ValueError, TypeError):
pass
# By default, use the INFO level
return logging.INFO
def start(self, context):
"""
Bundle starting
:param context: The bundle context
"""
# Get the maximum number of entries authorized
max_entries = context.get_property(PROPERTY_LOG_MAX_ENTRIES)
try:
# Normalize the value
max_entries = int(max_entries)
except (ValueError, TypeError):
max_entries = 100
# Register the LogReader service
reader = LogReaderService(context, max_entries)
self.__reader_reg = context.register_service(
LOG_READER_SERVICE, reader, {}
)
# Register the LogService factory
self.__factory = LogServiceFactory(
context, reader, self.get_level(context)
)
self.__factory_reg = context.register_service(
LOG_SERVICE, self.__factory, {}, factory=True
)
# Register the log service as a log handler
logging.getLogger().addHandler(self.__factory)
# ... but not for our own logs
logger.removeHandler(self.__factory)
def stop(self, _):
"""
Bundle stopping
"""
# Unregister the service
if self.__factory_reg is not None:
self.__factory_reg.unregister()
self.__factory_reg = None
if self.__reader_reg is not None:
self.__reader_reg.unregister()
self.__reader_reg = None
# Unregister the handler
logging.getLogger().removeHandler(self.__factory)
self.__factory = None
| {
"content_hash": "754246cf769dd9e29a6740553d467149",
"timestamp": "",
"source": "github",
"line_count": 515,
"max_line_length": 80,
"avg_line_length": 27.63883495145631,
"alnum_prop": 0.5613320219193481,
"repo_name": "tcalmant/ipopo",
"id": "fc8cb3d2406d848a3ad38baac9eaf31d0896750d",
"size": "14288",
"binary": false,
"copies": "1",
"ref": "refs/heads/v1",
"path": "pelix/misc/log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2183067"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from unittest import skipUnless
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB, mysql, oracle, no_mysql, no_oracle, no_spatialite
from django.test import TestCase
if HAS_GEOS:
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e):
self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertTrue(isinstance(d['point'], Geometry))
self.assertTrue(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertTrue('Aurora' in names)
self.assertTrue('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertTrue(isinstance(qs[0]['point'], GEOSGeometry))
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
| {
"content_hash": "8e7ed9f610f6faaff85ce91b170aa837",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 155,
"avg_line_length": 48.504885993485345,
"alnum_prop": 0.6387750990531194,
"repo_name": "ericholscher/django",
"id": "b165699e22143e92196a51f4b5539274d6f6c4f6",
"size": "14891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/gis/tests/relatedapp/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "51177"
},
{
"name": "JavaScript",
"bytes": "102377"
},
{
"name": "Python",
"bytes": "9011891"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
} |
from flask import Flask, request
from flask.views import MethodView
from flask.ext.introspect import blueprint as rest, TreeView, ObjectViewMixin, DictViewMixin
from unittest import TestCase
class Config():
SERVER_NAME = 'localhost'
DEBUG = True
TESTING = True
class O(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class OView(ObjectViewMixin, TreeView):
__type__ = O
'''
view class methods will always recieve suitable item
and can incapsulate web related functionality (including permissions)
'''
class view(MethodView):
def get(self, item):
return item.obj.a
class DictView(DictViewMixin, OView):
class view(MethodView):
def get(self, item):
return item.obj['c']
def post(self, item):
return request.values['a'] + request.values['b']
class TestRESTBlueprint(TestCase):
def setUp(self):
app = Flask(__name__)
app.config.from_object(Config)
self.app = app.test_client()
def test_get(self):
'''
another way to limit access is to arrange setup_tree function that
will determine tree parameters (roots and leafs) according to user permissions
'''
api = rest('api', __name__, {'o': O(a='a name', b={'c':'d'})}, roots=OView)
self.app.application.register_blueprint(api, url_prefix='/api')
self.assertTrue('\n'.join(self.app.get('/api/o').response) == 'a name')
self.assertTrue('\n'.join(self.app.get('/api/o/b').response) == 'd')
def test_post(self):
api = rest('api', __name__, {'o': O(a='a name', b={'c':'d'})}, roots=OView)
self.app.application.register_blueprint(api, url_prefix='/api')
self.assertTrue('\n'.join(self.app.post('/api/o/b', data={'a':1, 'b':2}).response) == '12')
if __name__ == '__main__':
from unittest import main
main() | {
"content_hash": "5471e3fb0717d4eadef3d35acbe91002",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 99,
"avg_line_length": 33.01724137931034,
"alnum_prop": 0.6135770234986945,
"repo_name": "denz/flask_introspect",
"id": "f4f91537c4158b103b18ffda45ed8e885a6ad033",
"size": "1915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_blueprint.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "41933"
}
],
"symlink_target": ""
} |
import re
class BaseTokenizer(object):
"""
Really simple tokenizer
"""
@staticmethod
def tokenize(text):
"""
Splits text into a list of words removing any symbol and converts it into lowercase
"""
tokens = []
text = text.lower()
for dot_item in BaseTokenizer.regex_split('\.(?=[a-zA-Z\s])', text):
for comman_item in BaseTokenizer.regex_split(',(?=[a-zA-Z\s])', dot_item):
for item in comman_item.split(" "):
item = BaseTokenizer.tokenize_item(item)
if item:
tokens.append(item)
return tokens
@staticmethod
def regex_split(regex, text):
for item in re.split(regex, text, re.I):
yield item
@staticmethod
def tokenize_item(item):
"""
If it is an int/float it returns the item (there's no need to remove , or .)
"""
item = item.strip()
try:
float(item)
return item
except ValueError:
pass
# This will keep underscores
return re.sub(r'[^\w]', '', item)
| {
"content_hash": "4b9858680dda8548183198c6e540d949",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 91,
"avg_line_length": 28.825,
"alnum_prop": 0.5238508239375542,
"repo_name": "umitproject/tease-o-matic",
"id": "9a201356a478baf77409241bce3d93240ea97384",
"size": "1153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_mongodb_engine/contrib/search/tokenizer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "107814"
},
{
"name": "Python",
"bytes": "6962452"
},
{
"name": "Ruby",
"bytes": "1987"
}
],
"symlink_target": ""
} |
from direct.showbase.PythonUtil import invertDict
from toontown.toonbase import ToontownGlobals
from toontown.coghq import NullCogs
from toontown.coghq import CashbotMintBoilerRoom_Battle00_Cogs
from toontown.coghq import CashbotMintBoilerRoom_Battle01_Cogs
from toontown.coghq import CashbotMintControlRoom_Battle00_Cogs
from toontown.coghq import CashbotMintDuctRoom_Battle00_Cogs
from toontown.coghq import CashbotMintDuctRoom_Battle01_Cogs
from toontown.coghq import CashbotMintGearRoom_Battle00_Cogs
from toontown.coghq import CashbotMintGearRoom_Battle01_Cogs
from toontown.coghq import CashbotMintLavaRoomFoyer_Battle00_Cogs
from toontown.coghq import CashbotMintLavaRoomFoyer_Battle01_Cogs
from toontown.coghq import CashbotMintLobby_Battle00_Cogs
from toontown.coghq import CashbotMintLobby_Battle01_Cogs
from toontown.coghq import CashbotMintOilRoom_Battle00_Cogs
from toontown.coghq import CashbotMintPaintMixerReward_Battle00_Cogs
from toontown.coghq import CashbotMintPipeRoom_Battle00_Cogs
from toontown.coghq import CashbotMintPipeRoom_Battle01_Cogs
# Explicit imports for the below room modules:
from toontown.coghq import CashbotMintEntrance_Action00
from toontown.coghq import CashbotMintBoilerRoom_Action00
from toontown.coghq import CashbotMintBoilerRoom_Battle00
from toontown.coghq import CashbotMintDuctRoom_Action00
from toontown.coghq import CashbotMintDuctRoom_Battle00
from toontown.coghq import CashbotMintGearRoom_Action00
from toontown.coghq import CashbotMintGearRoom_Battle00
from toontown.coghq import CashbotMintLavaRoomFoyer_Action00
from toontown.coghq import CashbotMintLavaRoomFoyer_Action01
from toontown.coghq import CashbotMintLavaRoomFoyer_Battle00
from toontown.coghq import CashbotMintLavaRoom_Action00
from toontown.coghq import CashbotMintLobby_Action00
from toontown.coghq import CashbotMintLobby_Battle00
from toontown.coghq import CashbotMintPaintMixer_Action00
from toontown.coghq import CashbotMintPipeRoom_Action00
from toontown.coghq import CashbotMintPipeRoom_Battle00
from toontown.coghq import CashbotMintStomperAlley_Action00
from toontown.coghq import CashbotMintBoilerRoom_Battle01
from toontown.coghq import CashbotMintControlRoom_Battle00
from toontown.coghq import CashbotMintDuctRoom_Battle01
from toontown.coghq import CashbotMintGearRoom_Battle01
from toontown.coghq import CashbotMintLavaRoomFoyer_Battle01
from toontown.coghq import CashbotMintOilRoom_Battle00
from toontown.coghq import CashbotMintLobby_Battle01
from toontown.coghq import CashbotMintPaintMixerReward_Battle00
from toontown.coghq import CashbotMintPipeRoom_Battle01
def getMintRoomSpecModule(roomId):
return CashbotMintSpecModules[roomId]
def getCogSpecModule(roomId):
roomName = CashbotMintRoomId2RoomName[roomId]
return CogSpecModules.get(roomName, NullCogs)
def getNumBattles(roomId):
return roomId2numBattles[roomId]
CashbotMintRoomId2RoomName = {0: 'CashbotMintEntrance_Action00',
1: 'CashbotMintBoilerRoom_Action00',
2: 'CashbotMintBoilerRoom_Battle00',
3: 'CashbotMintDuctRoom_Action00',
4: 'CashbotMintDuctRoom_Battle00',
5: 'CashbotMintGearRoom_Action00',
6: 'CashbotMintGearRoom_Battle00',
7: 'CashbotMintLavaRoomFoyer_Action00',
8: 'CashbotMintLavaRoomFoyer_Action01',
9: 'CashbotMintLavaRoomFoyer_Battle00',
10: 'CashbotMintLavaRoom_Action00',
11: 'CashbotMintLobby_Action00',
12: 'CashbotMintLobby_Battle00',
13: 'CashbotMintPaintMixer_Action00',
14: 'CashbotMintPipeRoom_Action00',
15: 'CashbotMintPipeRoom_Battle00',
16: 'CashbotMintStomperAlley_Action00',
17: 'CashbotMintBoilerRoom_Battle01',
18: 'CashbotMintControlRoom_Battle00',
19: 'CashbotMintDuctRoom_Battle01',
20: 'CashbotMintGearRoom_Battle01',
21: 'CashbotMintLavaRoomFoyer_Battle01',
22: 'CashbotMintOilRoom_Battle00',
23: 'CashbotMintLobby_Battle01',
24: 'CashbotMintPaintMixerReward_Battle00',
25: 'CashbotMintPipeRoom_Battle01'}
CashbotMintRoomName2RoomId = invertDict(CashbotMintRoomId2RoomName)
CashbotMintEntranceIDs = (0,)
CashbotMintMiddleRoomIDs = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
CashbotMintFinalRoomIDs = (17, 18, 19, 20, 21, 22, 23, 24, 25)
CashbotMintConnectorRooms = ('phase_10/models/cashbotHQ/connector_7cubeL2', 'phase_10/models/cashbotHQ/connector_7cubeR2')
CashbotMintSpecModules = {}
for roomName, roomId in CashbotMintRoomName2RoomId.items():
CashbotMintSpecModules[roomId] = locals()[roomName]
CogSpecModules = {'CashbotMintBoilerRoom_Battle00': CashbotMintBoilerRoom_Battle00_Cogs,
'CashbotMintBoilerRoom_Battle01': CashbotMintBoilerRoom_Battle01_Cogs,
'CashbotMintControlRoom_Battle00': CashbotMintControlRoom_Battle00_Cogs,
'CashbotMintDuctRoom_Battle00': CashbotMintDuctRoom_Battle00_Cogs,
'CashbotMintDuctRoom_Battle01': CashbotMintDuctRoom_Battle01_Cogs,
'CashbotMintGearRoom_Battle00': CashbotMintGearRoom_Battle00_Cogs,
'CashbotMintGearRoom_Battle01': CashbotMintGearRoom_Battle01_Cogs,
'CashbotMintLavaRoomFoyer_Battle00': CashbotMintLavaRoomFoyer_Battle00_Cogs,
'CashbotMintLavaRoomFoyer_Battle01': CashbotMintLavaRoomFoyer_Battle01_Cogs,
'CashbotMintLobby_Battle00': CashbotMintLobby_Battle00_Cogs,
'CashbotMintLobby_Battle01': CashbotMintLobby_Battle01_Cogs,
'CashbotMintOilRoom_Battle00': CashbotMintOilRoom_Battle00_Cogs,
'CashbotMintPaintMixerReward_Battle00': CashbotMintPaintMixerReward_Battle00_Cogs,
'CashbotMintPipeRoom_Battle00': CashbotMintPipeRoom_Battle00_Cogs,
'CashbotMintPipeRoom_Battle01': CashbotMintPipeRoom_Battle01_Cogs}
roomId2numBattles = {}
for roomName, roomId in CashbotMintRoomName2RoomId.items():
if roomName not in CogSpecModules:
roomId2numBattles[roomId] = 0
else:
cogSpecModule = CogSpecModules[roomName]
roomId2numBattles[roomId] = len(cogSpecModule.BattleCells)
name2id = CashbotMintRoomName2RoomId
roomId2numBattles[name2id['CashbotMintBoilerRoom_Battle00']] = 3
roomId2numBattles[name2id['CashbotMintPipeRoom_Battle00']] = 2
del name2id
middleRoomId2numBattles = {}
for roomId in CashbotMintMiddleRoomIDs:
middleRoomId2numBattles[roomId] = roomId2numBattles[roomId]
| {
"content_hash": "fe66e158db364737ee26d494252941ca",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 122,
"avg_line_length": 48.608,
"alnum_prop": 0.8413429888084266,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "c7bebf8637e68fac6079784350e69be6cd09633f",
"size": "6076",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "toontown/coghq/MintRoomSpecs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
} |
print "I could have code like this." # and the comment after is ignored
# You can also use a comment to "disable" or comment out a piece of code:
# print "this won't run."
print "This will run."
| {
"content_hash": "f0e169ba1b3ce9041f7f36588998c905",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 73,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.7106598984771574,
"repo_name": "alfredleo/learnPythonTheHardWay",
"id": "69c9e7abea41a7d0f716f041c6e532bfb7d0dd22",
"size": "300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11469"
}
],
"symlink_target": ""
} |
import csv
import re
def preProcess(column):
column = re.sub(' +', ' ', column)
column = re.sub('\n', ' ', column)
column = column.strip().strip('"').strip("'").lower()
if not column:
column = None
return column
def print_csv(input_file, output_file, header, clustered_dupes):
orig_data = {}
with open(input_file) as f:
reader = csv.reader(f)
reader.next()
for row_id, row in enumerate(reader):
orig_data[row_id] = row
# with open("examples/output/ECP_dupes_list_" + str(time.time()) + ".csv","w") as f :
with open(output_file, "w") as f:
writer = csv.writer(f)
heading_row = header
heading_row.insert(0, "Group_ID")
writer.writerow(heading_row)
dupe_id_list = []
for group_id, cluster in enumerate(clustered_dupes, 1):
for candidate in sorted(cluster):
dupe_id_list.append(candidate)
row = orig_data[candidate]
row.insert(0, group_id)
writer.writerow(row)
for id in orig_data:
if id not in set(dupe_id_list):
row = orig_data[id]
row.insert(0, 'x')
writer.writerow(row)
| {
"content_hash": "10af6a76a18b03e17762bf03fa91aa5b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 89,
"avg_line_length": 29.785714285714285,
"alnum_prop": 0.539568345323741,
"repo_name": "datamade/dedupe",
"id": "d6ed956fb1a92f84d776ca11bbd73319791c95ac",
"size": "1251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/exampleIO.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "190850"
},
{
"name": "Shell",
"bytes": "1224"
}
],
"symlink_target": ""
} |
"""
"""
# Native
import random
import time
import curses
from curses import wrapper
import argparse
# # 3rd-Party
# # Proprietary
from monitors.building_monitor import BuildingMonitor
def main(screen):
parser = argparse.ArgumentParser(description='Building Monitor')
parser.add_argument('--interval', action='store', default=1.0)
args = parser.parse_args()
monitor = BuildingMonitor(screen, interval=args.interval)
monitor.run()
wrapper(main)
| {
"content_hash": "b0303ac524391ca4dfbea20012a75f80",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 68,
"avg_line_length": 15.35483870967742,
"alnum_prop": 0.7226890756302521,
"repo_name": "m3talstorm/foe-bot",
"id": "7ed1b80bbfcbf4062c54225ff866c2068e274470",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foe/monitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38192"
}
],
"symlink_target": ""
} |
'''
Copyright (c) 2017 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
'''
import logging
import os
import re
import sys
from plugins.helpers.macinfo import *
from plugins.helpers.writer import *
__Plugin_Name = "NETWORKING"
__Plugin_Friendly_Name = "Networking"
__Plugin_Version = "1.0"
__Plugin_Description = 'Gets network related information - Interfaces, last IP addresses, MAC address, etc..'
__Plugin_Author = "Yogesh Khatri"
__Plugin_Author_Email = "yogesh@swiftforensics.com"
__Plugin_Modes = "MACOS,IOS"
__Plugin_ArtifactOnly_Usage = ''
log = logging.getLogger('MAIN.' + __Plugin_Name) # Do not rename or remove this ! This is the logger object
#---- Do not change the variable names in above section ----#
PYTHON_VER = sys.version_info.major
dhcp_interfaces = []
dhcp_data_info = [ ('Interface',DataType.TEXT),('MAC_Address',DataType.TEXT),('IPAddress',DataType.TEXT),
('LeaseLength',DataType.INTEGER),('LeaseStartDate',DataType.DATE),('PacketData',DataType.BLOB),
('RouterHardwareAddress',DataType.TEXT),('RouterIPAddress',DataType.TEXT),
('SSID', DataType.TEXT),('Source', DataType.TEXT)
]
resolv_conf = []
etc_hosts = []
net_interfaces = []
net_interface_info = [ ('Category',DataType.TEXT),('Active',DataType.TEXT),('BSD Name',DataType.TEXT),
('IOBuiltin',DataType.TEXT),('IOInterfaceNamePrefix', DataType.TEXT),('IOInterfaceType',DataType.INTEGER),
('IOInterfaceUnit', DataType.INTEGER),('IOMACAddress',DataType.TEXT),('IOPathMatch',DataType.TEXT),
('SCNetworkInterfaceInfo',DataType.TEXT),('SCNetworkInterfaceType',DataType.TEXT),('Source', DataType.TEXT)
]
net_interface_details = []
net_interface_detail_info = [ ('UUID',DataType.TEXT),('IPv4.ConfigMethod',DataType.TEXT),('IPv6.ConfigMethod',DataType.TEXT),
('DeviceName',DataType.TEXT),('Hardware',DataType.TEXT),('Type',DataType.TEXT),
('SubType',DataType.TEXT),('UserDefinedName',DataType.TEXT),('Proxies.ExceptionsList',DataType.TEXT),
('SMB.NetBIOSName',DataType.TEXT),('SMB.Workgroup',DataType.TEXT),
('PPP',DataType.TEXT),('Modem',DataType.TEXT),('Source', DataType.TEXT) #,('VirtualInterfaces',DataType.TEXT)
]
def GetNetworkInterface2Info(mac_info, preference_plist_path):
'''Read interface info from /Library/Preferences/SystemConfiguration/preferences.plist'''
#preference_plist_path = '/Library/Preferences/SystemConfiguration/preferences.plist'
mac_info.ExportFile(preference_plist_path, __Plugin_Name, '', False)
success, plist, error_message = mac_info.ReadPlist(preference_plist_path)
if success:
try:
for uuid, interface in list(plist['NetworkServices'].items()):
interface_info = { 'UUID': uuid, 'Source': preference_plist_path }
for item, value in list(interface.items()):
if item == 'DNS' and value: log.info('Interface {} has DNS info as : {}'.format(uuid, value))
elif item == 'UserDefinedName' or item == 'Modem' or item == 'PPP': interface_info[item] = str(value)
elif item == 'Proxies':
try:
exceptions = value['ExceptionsList']
interface_info['Proxies.ExceptionsList'] = ",".join(exceptions)
except (KeyError, ValueError):
log.debug('/NetworkServices/' + uuid + '/Proxies/ExceptionsList not found in plist ' + preference_plist_path)
elif item == 'IPv4':
try:
method = value['ConfigMethod']
interface_info['IPv4.ConfigMethod'] = method
except KeyError: log.error('/NetworkServices/' + uuid + '/IPv4/ConfigMethod not found in plist ' + preference_plist_path)
elif item == 'IPv6':
try:
method = value['ConfigMethod']
interface_info['IPv6.ConfigMethod'] = method
except KeyError: log.error('/NetworkServices/' + uuid + '/IPv6/ConfigMethod not found in plist ' + preference_plist_path)
elif item == 'Interface':
for k, v in list(value.items()):
if k in ['DeviceName', 'Hardware', 'Type', 'UserDefinedName']: interface_info[k] = v
else:
log.info('Found unknown data in plist at /NetworkServices/' + uuid + '/Interface/' + k + ' Value=' + str(v))
elif item == 'SMB':
for k, v in list(value.items()):
if k in ['NetBIOSName', 'Workgroup', 'Type', 'UserDefinedName']: interface_info['SMB.'+ k] = v
else:
log.info('Found unknown data in plist at /NetworkServices/' + uuid + '/SMB/' + k + ' Value=' + v)
net_interface_details.append(interface_info)
'''try:
for item, bridge in plist['VirtualNetworkInterfaces']['Bridge'].items():
try:
for if_name in bridge['Interfaces']:
for interface in net_interface_details:
if if_name == interface['DeviceName']:
interface['IsVirtualInterface'] = True
break
except:
log.debug('/VirtualNetworkInterfaces/Bridge/' + bridge + '/Interfaces not found!')
except Exception:
log.debug('/VirtualNetworkInterfaces/Bridge not found!')'''
except (KeyError, ValueError):
log.exception('/NetworkServices not found or other error from ' + preference_plist_path)
else:
log.error('Failed to read plist ' + preference_plist_path + " Error was : " + error_message)
def GetNetworkInterfaceInfo(mac_info, path):
'''Read interface info from NetworkInterfaces.plist'''
#path = '/Library/Preferences/SystemConfiguration/NetworkInterfaces.plist'
mac_info.ExportFile(path, __Plugin_Name, '', False)
log.debug("Trying to read {}".format(path))
success, plist, error = mac_info.ReadPlist(path)
if success:
model = plist.get('Model', '')
if model:
log.info("Model = " + model)
for category, cat_array in plist.items(): #value is another array in this dict
if not category.startswith('Interface'):
if category != 'Model': log.debug('Skipping ' + category)
continue
for interface in cat_array:
interface_info = {'Category':category, 'Source':path }
for item, value in interface.items():
if item in ['Active','BSD Name','IOBuiltin','IOInterfaceNamePrefix','IOInterfaceType',
'IOInterfaceUnit','IOPathMatch','SCNetworkInterfaceType']:
interface_info[item] = value
elif item == 'IOMACAddress': # convert binary blob to MAC address
data = ':'.join(value.hex()[i:i + 2] for i in range(0, len(value.hex()), 2))
interface_info[item] = data.upper()
elif item == 'SCNetworkInterfaceInfo':
interface_info['SCNetworkInterfaceInfo'] = value.get('UserDefinedName', '')
else:
log.info("Found unknown item in plist: ITEM=" + item + " VALUE=" + str(value))
net_interfaces.append(interface_info)
else:
log.error("Could not open plist to get interface info for " + path + " Error was " + error)
def ParseDhcpFromPlist(plist, interface_info):
for item, value in list(plist.items()):
if item in ['IPAddress','LeaseLength','LeaseStartDate','PacketData','RouterIPAddress','SSID']:
interface_info[item] = value
elif item == 'RouterHardwareAddress': # convert binary blob to MAC address
data = ':'.join(value.hex()[i:i+2] for i in range(0,len(value.hex()),2))
interface_info[item] = data.upper()
elif item == 'ClientIdentifier': # ios 14
value = value[1:]
data = ':'.join(value.hex()[i:i+2] for i in range(0,len(value.hex()),2))
interface_info['MAC_Address'] = data.upper()
else:
log.info("Found unknown item in plist: ITEM=" + item + " VALUE=" + str(value))
def GetDhcpInfo(mac_info):
'''Read dhcp leases & interface entries'''
try:
interfaces = mac_info.ListItemsInFolder('/private/var/db/dhcpclient/leases', EntryType.FILES)
for interface in interfaces:
name = interface['name']
has_plist_ext = False
if name.endswith('.plist'): # ios14
has_plist_ext = True
if name.find(",") > 0 or has_plist_ext:
#Process plist
mac_info.ExportFile('/private/var/db/dhcpclient/leases/' + name, __Plugin_Name, '', False)
name_no_ext = os.path.splitext(name)[0]
if_name = name_no_ext # ios 14
mac_address = ''
if name.find(",") > 0:
if_name, mac_address = name_no_ext.split(",")
mac_address = re.sub("[^0-9a-fA-F]+", ":", mac_address)
log.info("Found mac address = " + mac_address + " on interface " + if_name)
log.debug("Trying to read {}".format(name))
path = '/private/var/db/dhcpclient/leases/' + name
success, plist, error = mac_info.ReadPlist(path)
if success:
interface_info = { 'Source':path,
'Interface':if_name,
'MAC_Address':mac_address }
ParseDhcpFromPlist(plist, interface_info)
dhcp_interfaces.append(interface_info)
else:
log.error("Could not open plist to get interface info for " + path + " Error was " + error)
else:
log.info("Found unexpected file, not processing /private/var/db/dhcpclient/leases/" + name + " size=" + str(interface['size']))
# Done processing interfaces!
except (ValueError, IndexError) as ex:
log.error("Could not list files for folder /private/var/db/dhcpclient/leases")
log.exception("Exception from GetDhcpInterfaces()")
def GetFileContents(mac_info, path):
lines = []
log.debug("Trying to read {}".format(path))
f = mac_info.Open(path)
if f != None:
try:
for line in f:
if not line.startswith(b'#'):
line = line.rstrip(b' \t\n\r')
#log.debug("Content --> " + line)
lines.append(line.decode('utf-8', 'backslashreplace'))
except ValueError as ex:
log.error("Unknown error while reading file " + path + " : " + str(ex))
else:
log.error("Could not open file " + path)
return lines
def GetResolvConf(mac_info):
'''Reads last domain and nameserver data from resolv.conf'''
resolv_conf_path = '/private/var/run/resolv.conf'
if mac_info.IsValidFilePath(resolv_conf_path):
resolv_conf = GetFileContents(mac_info, resolv_conf_path)
mac_info.ExportFile(resolv_conf_path, __Plugin_Name, '', False)
for line in resolv_conf:
log.info("resolve.conf Content --> " + line)
else:
log.info("{} does not exist!".format(resolv_conf_path))
def GetEtcHosts(mac_info):
'''Reads hosts file'''
etc_hosts_path = '/private/etc/hosts'
if mac_info.IsValidFilePath(etc_hosts_path):
etc_hosts = GetFileContents(mac_info, etc_hosts_path)
mac_info.ExportFile(etc_hosts_path, __Plugin_Name, '', False)
for line in etc_hosts:
log.info("/etc/hosts Content --> " + line)
else:
log.info("{} does not exist!".format(etc_hosts_path))
def Plugin_Start(mac_info):
'''Main Entry point function for plugin'''
dchp_duid_plist = '/private/var/db/dhcpclient/DUID_IA.plist'
# TODO: Read duid plist and display..
#
GetDhcpInfo(mac_info)
GetResolvConf(mac_info) # Not writing to file yet!
GetEtcHosts(mac_info) # Not writing to file yet!
GetNetworkInterfaceInfo(mac_info, '/Library/Preferences/SystemConfiguration/NetworkInterfaces.plist')
GetNetworkInterface2Info(mac_info, '/Library/Preferences/SystemConfiguration/preferences.plist')
WriteList('dhcp data', 'Network_DHCP', dhcp_interfaces, dhcp_data_info, mac_info.output_params)
WriteList('network interface data', 'Network_Interfaces', net_interfaces, net_interface_info, mac_info.output_params, '/Library/Preferences/SystemConfiguration/NetworkInterfaces.plist')
WriteList('network interface details', 'Network_Details', net_interface_details, net_interface_detail_info, mac_info.output_params, '/Library/Preferences/SystemConfiguration/preferences.plist')
def Plugin_Start_Standalone(input_files_list, output_params):
log.info("This plugin cannot be run as standalone")
def Plugin_Start_Ios(ios_info):
'''Entry point for ios_apt plugin'''
GetDhcpInfo(ios_info)
GetResolvConf(ios_info) # Not writing to file yet!
GetEtcHosts(ios_info) # Not writing to file yet!
GetNetworkInterfaceInfo(ios_info, '/private/var/Preferences/SystemConfiguration/NetworkInterfaces.plist')
GetNetworkInterface2Info(ios_info, '/private/var/Preferences/SystemConfiguration/preferences.plist')
WriteList('dhcp data', 'Network_DHCP', dhcp_interfaces, dhcp_data_info, ios_info.output_params)
WriteList('network interface data', 'Network_Interfaces', net_interfaces, net_interface_info, ios_info.output_params, '/Library/Preferences/SystemConfiguration/NetworkInterfaces.plist')
WriteList('network interface details', 'Network_Details', net_interface_details, net_interface_detail_info, ios_info.output_params, '/Library/Preferences/SystemConfiguration/preferences.plist')
if __name__ == '__main__':
print ("This plugin is a part of a framework and does not run independently on its own!") | {
"content_hash": "1aa4df1a51e2c8e243f41ce36f66d8a0",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 197,
"avg_line_length": 55.83018867924528,
"alnum_prop": 0.5858736059479553,
"repo_name": "ydkhatri/mac_apt",
"id": "5540359aaec04b9b282100b74dd0c5106d382d08",
"size": "14795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/networking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Kaitai Struct",
"bytes": "19375"
},
{
"name": "Python",
"bytes": "1315164"
}
],
"symlink_target": ""
} |
import sys
import os
up_time_quanta = 60
f = open(sys.argv[1])
announce_histogram = {}
node_uptime_histogram = {}
for i in xrange(0, 50): announce_histogram[i] = 0
for i in xrange(0, 5000, up_time_quanta): node_uptime_histogram[i] = 0
counter = 0;
for line in f:
counter += 1
if counter % 1000 == 0:
print '\r%d' % counter,
try:
if 'distance:' in line:
l = line.split(' ')
d = int(l[4])
announce_histogram[d] += 1
if 'NODE FAILED' in line:
l = line.split(' ')
if int(l[9]) != 1: continue;
d = int(l[11])
node_uptime_histogram[d - (d % up_time_quanta)] += 1
except:
print line.split(' ')
out = open('dht_announce_distribution.dat', 'w+')
for k,v in announce_histogram.items():
print >>out, '%d %d' % (k, v)
out.close()
out = open('dht_node_uptime_distribution.dat', 'w+')
for k,v in node_uptime_histogram.items():
print >>out, '%d %d' % (k + up_time_quanta/2, v)
out.close()
out = open('dht.gnuplot', 'w+')
out.write('''
set term png size 1200,700 small
set output "dht_announce_distribution.png"
set title "bucket # announces are made against relative to target node-id"
set ylabel "# of announces"
set style fill solid border -1 pattern 2
plot "dht_announce_distribution.dat" using 1:2 title "announces" with boxes
set terminal postscript
set output "dht_announce_distribution.ps"
replot
set term png size 1200,700 small
set output "dht_node_uptime_distribution.png"
set title "node up time"
set ylabel "# of nodes"
set xlabel "uptime (seconds)"
set boxwidth %f
set style fill solid border -1 pattern 2
plot "dht_node_uptime_distribution.dat" using 1:2 title "nodes" with boxes
''' % up_time_quanta)
out.close()
os.system('gnuplot dht.gnuplot');
| {
"content_hash": "20703a21856aaf09040dcf2d0b54ed2f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 76,
"avg_line_length": 24.608695652173914,
"alnum_prop": 0.6743227326266196,
"repo_name": "chongyc/libtorrent",
"id": "51af6525e4af1d914fe1d99f15a0d237719aef42",
"size": "1698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parse_dht_log.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "301335"
},
{
"name": "C++",
"bytes": "1443409"
},
{
"name": "Python",
"bytes": "25676"
},
{
"name": "Shell",
"bytes": "543"
}
],
"symlink_target": ""
} |
from flask import request
from indico.modules.admin.views import WPAdmin
from indico.modules.users import User
from indico.util.i18n import _
from indico.web.breadcrumbs import render_breadcrumbs
from indico.web.views import WPDecorated, WPJinjaMixin
class WPUser(WPJinjaMixin, WPDecorated):
"""Base WP for user profile pages.
Whenever you use this, you MUST include `user` in the params passed to
`render_template`. Any RH using this should inherit from `RHUserBase`
which already handles user/admin access. In this case, simply add
``user=self.user`` to your `render_template` call.
"""
template_prefix = 'users/'
def __init__(self, rh, active_menu_item, **kwargs):
kwargs['active_menu_item'] = active_menu_item
WPDecorated.__init__(self, rh, **kwargs)
def _get_breadcrumbs(self):
if 'user_id' in request.view_args:
user = User.get(request.view_args['user_id'])
profile_breadcrumb = _('Profile of {name}').format(name=user.full_name)
else:
profile_breadcrumb = _('My Profile')
return render_breadcrumbs(profile_breadcrumb)
def _get_body(self, params):
return self._get_page_content(params)
class WPUserDashboard(WPUser):
bundles = ('module_users.dashboard.js',)
class WPUserProfilePic(WPUser):
bundles = ('module_users.profile_picture.js', 'module_users.profile_picture.css')
class WPUserFavorites(WPUser):
bundles = ('module_users.favorites.js', 'module_users.favorites.css')
class WPUsersAdmin(WPAdmin):
template_prefix = 'users/'
bundles = ('module_users.js',)
| {
"content_hash": "78998e2bb32142e079b297efcd4c46c5",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 85,
"avg_line_length": 31.88235294117647,
"alnum_prop": 0.6875768757687577,
"repo_name": "pferreir/indico",
"id": "9bbf031071d1d8fad1d77a3586dc911454c0c605",
"size": "1840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/users/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1394116"
},
{
"name": "JavaScript",
"bytes": "2078347"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "4993798"
},
{
"name": "SCSS",
"bytes": "475126"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
import platform
import re
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
import oslo_messaging
from neutron.agent.common import config
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants as n_const
from neutron.common import topics
from neutron import context
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.common import constants as p_const
from neutron.plugins.hyperv.agent import utils
from neutron.plugins.hyperv.agent import utilsfactory
from neutron.plugins.hyperv.common import constants
LOG = logging.getLogger(__name__)
agent_opts = [
cfg.ListOpt(
'physical_network_vswitch_mappings',
default=[],
help=_('List of <physical_network>:<vswitch> '
'where the physical networks can be expressed with '
'wildcards, e.g.: ."*:external"')),
cfg.StrOpt(
'local_network_vswitch',
default='private',
help=_('Private vswitch name used for local networks')),
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
cfg.BoolOpt('enable_metrics_collection',
default=False,
help=_('Enables metrics collections for switch ports by using '
'Hyper-V\'s metric APIs. Collected data can by '
'retrieved by other apps and services, e.g.: '
'Ceilometer. Requires Hyper-V / Windows Server 2012 '
'and above')),
cfg.IntOpt('metrics_max_retries',
default=100,
help=_('Specifies the maximum number of retries to enable '
'Hyper-V\'s port metrics collection. The agent will try '
'to enable the feature once every polling_interval '
'period for at most metrics_max_retries or until it '
'succeedes.'))
]
CONF = cfg.CONF
CONF.register_opts(agent_opts, "AGENT")
config.register_agent_state_opts_helper(cfg.CONF)
class HyperVSecurityAgent(sg_rpc.SecurityGroupAgentRpc):
def __init__(self, context, plugin_rpc):
super(HyperVSecurityAgent, self).__init__(context, plugin_rpc)
if sg_rpc.is_firewall_enabled():
self._setup_rpc()
@property
def use_enhanced_rpc(self):
return False
def _setup_rpc(self):
self.topic = topics.AGENT
self.endpoints = [HyperVSecurityCallbackMixin(self)]
consumers = [[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
class HyperVSecurityCallbackMixin(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
target = oslo_messaging.Target(version='1.1')
def __init__(self, sg_agent):
super(HyperVSecurityCallbackMixin, self).__init__()
self.sg_agent = sg_agent
class HyperVNeutronAgent(object):
# Set RPC API version to 1.1 by default.
target = oslo_messaging.Target(version='1.1')
def __init__(self):
super(HyperVNeutronAgent, self).__init__()
self._utils = utilsfactory.get_hypervutils()
self._polling_interval = CONF.AGENT.polling_interval
self._load_physical_network_mappings()
self._network_vswitch_map = {}
self._port_metric_retries = {}
self._set_agent_state()
self._setup_rpc()
def _set_agent_state(self):
self.agent_state = {
'binary': 'neutron-hyperv-agent',
'host': cfg.CONF.host,
'topic': n_const.L2_AGENT_TOPIC,
'configurations': {'vswitch_mappings':
self._physical_network_mappings},
'agent_type': n_const.AGENT_TYPE_HYPERV,
'start_flag': True}
def _report_state(self):
try:
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def _setup_rpc(self):
self.agent_id = 'hyperv_%s' % platform.node()
self.topic = topics.AGENT
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# Handle updates from service
self.endpoints = [self]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.PORT, topics.DELETE],
[constants.TUNNEL, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
self.sec_groups_agent = HyperVSecurityAgent(
self.context, self.sg_plugin_rpc)
report_interval = CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _load_physical_network_mappings(self):
self._physical_network_mappings = {}
for mapping in CONF.AGENT.physical_network_vswitch_mappings:
parts = mapping.split(':')
if len(parts) != 2:
LOG.debug('Invalid physical network mapping: %s', mapping)
else:
pattern = re.escape(parts[0].strip()).replace('\\*', '.*')
vswitch = parts[1].strip()
self._physical_network_mappings[pattern] = vswitch
def _get_vswitch_for_physical_network(self, phys_network_name):
for pattern in self._physical_network_mappings:
if phys_network_name is None:
phys_network_name = ''
if re.match(pattern, phys_network_name):
return self._physical_network_mappings[pattern]
# Not found in the mappings, the vswitch has the same name
return phys_network_name
def _get_network_vswitch_map_by_port_id(self, port_id):
for network_id, map in self._network_vswitch_map.iteritems():
if port_id in map['ports']:
return (network_id, map)
def network_delete(self, context, network_id=None):
LOG.debug("network_delete received. "
"Deleting network %s", network_id)
# The network may not be defined on this agent
if network_id in self._network_vswitch_map:
self._reclaim_local_network(network_id)
else:
LOG.debug("Network %s not defined on agent.", network_id)
def port_delete(self, context, port_id=None):
LOG.debug("port_delete received")
self._port_unbound(port_id)
def port_update(self, context, port=None, network_type=None,
segmentation_id=None, physical_network=None):
LOG.debug("port_update received")
if CONF.SECURITYGROUP.enable_security_group:
if 'security_groups' in port:
self.sec_groups_agent.refresh_firewall()
self._treat_vif_port(
port['id'], port['network_id'],
network_type, physical_network,
segmentation_id, port['admin_state_up'])
def _get_vswitch_name(self, network_type, physical_network):
if network_type != p_const.TYPE_LOCAL:
vswitch_name = self._get_vswitch_for_physical_network(
physical_network)
else:
vswitch_name = CONF.AGENT.local_network_vswitch
return vswitch_name
def _provision_network(self, port_id,
net_uuid, network_type,
physical_network,
segmentation_id):
LOG.info(_LI("Provisioning network %s"), net_uuid)
vswitch_name = self._get_vswitch_name(network_type, physical_network)
if network_type in [p_const.TYPE_VLAN, p_const.TYPE_FLAT]:
#Nothing to do
pass
elif network_type == p_const.TYPE_LOCAL:
#TODO(alexpilotti): Check that the switch type is private
#or create it if not existing
pass
else:
raise utils.HyperVException(
msg=(_("Cannot provision unknown network type %(network_type)s"
" for network %(net_uuid)s") %
dict(network_type=network_type, net_uuid=net_uuid)))
map = {
'network_type': network_type,
'vswitch_name': vswitch_name,
'ports': [],
'vlan_id': segmentation_id}
self._network_vswitch_map[net_uuid] = map
def _reclaim_local_network(self, net_uuid):
LOG.info(_LI("Reclaiming local network %s"), net_uuid)
del self._network_vswitch_map[net_uuid]
def _port_bound(self, port_id,
net_uuid,
network_type,
physical_network,
segmentation_id):
LOG.debug("Binding port %s", port_id)
if net_uuid not in self._network_vswitch_map:
self._provision_network(
port_id, net_uuid, network_type,
physical_network, segmentation_id)
map = self._network_vswitch_map[net_uuid]
map['ports'].append(port_id)
self._utils.connect_vnic_to_vswitch(map['vswitch_name'], port_id)
if network_type == p_const.TYPE_VLAN:
LOG.info(_LI('Binding VLAN ID %(segmentation_id)s '
'to switch port %(port_id)s'),
dict(segmentation_id=segmentation_id, port_id=port_id))
self._utils.set_vswitch_port_vlan_id(
segmentation_id,
port_id)
elif network_type == p_const.TYPE_FLAT:
#Nothing to do
pass
elif network_type == p_const.TYPE_LOCAL:
#Nothing to do
pass
else:
LOG.error(_LE('Unsupported network type %s'), network_type)
if CONF.AGENT.enable_metrics_collection:
self._utils.enable_port_metrics_collection(port_id)
self._port_metric_retries[port_id] = CONF.AGENT.metrics_max_retries
def _port_unbound(self, port_id):
(net_uuid, map) = self._get_network_vswitch_map_by_port_id(port_id)
if net_uuid not in self._network_vswitch_map:
LOG.info(_LI('Network %s is not avalailable on this agent'),
net_uuid)
return
LOG.debug("Unbinding port %s", port_id)
self._utils.disconnect_switch_port(map['vswitch_name'], port_id, True)
if not map['ports']:
self._reclaim_local_network(net_uuid)
def _port_enable_control_metrics(self):
if not CONF.AGENT.enable_metrics_collection:
return
for port_id in self._port_metric_retries.keys():
if self._utils.can_enable_control_metrics(port_id):
self._utils.enable_control_metrics(port_id)
LOG.info(_LI('Port metrics enabled for port: %s'), port_id)
del self._port_metric_retries[port_id]
elif self._port_metric_retries[port_id] < 1:
self._utils.enable_control_metrics(port_id)
LOG.error(_LE('Port metrics raw enabling for port: %s'),
port_id)
del self._port_metric_retries[port_id]
else:
self._port_metric_retries[port_id] -= 1
def _update_ports(self, registered_ports):
ports = self._utils.get_vnic_ids()
if ports == registered_ports:
return
added = ports - registered_ports
removed = registered_ports - ports
return {'current': ports,
'added': added,
'removed': removed}
def _treat_vif_port(self, port_id, network_id, network_type,
physical_network, segmentation_id,
admin_state_up):
if self._utils.vnic_port_exists(port_id):
if admin_state_up:
self._port_bound(port_id, network_id, network_type,
physical_network, segmentation_id)
else:
self._port_unbound(port_id)
else:
LOG.debug("No port %s defined on agent.", port_id)
def _treat_devices_added(self, devices):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context,
devices,
self.agent_id)
except Exception as e:
LOG.debug("Unable to get ports details for "
"devices %(devices)s: %(e)s",
{'devices': devices, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.info(_LI("Adding port %s"), device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: "
"%(device_details)s"),
{'device': device, 'device_details': device_details})
self._treat_vif_port(
device_details['port_id'],
device_details['network_id'],
device_details['network_type'],
device_details['physical_network'],
device_details['segmentation_id'],
device_details['admin_state_up'])
# check if security groups is enabled.
# if not, teardown the security group rules
if CONF.SECURITYGROUP.enable_security_group:
self.sec_groups_agent.prepare_devices_filter([device])
else:
self._utils.remove_all_security_rules(
device_details['port_id'])
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
return False
def _treat_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info(_LI("Removing port %s"), device)
try:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("Removing port failed for device %(device)s: %(e)s",
dict(device=device, e=e))
resync = True
continue
self._port_unbound(device)
return resync
def _process_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info:
resync_a = self._treat_devices_added(port_info['added'])
if 'removed' in port_info:
resync_b = self._treat_devices_removed(port_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def daemon_loop(self):
sync = True
ports = set()
while True:
try:
start = time.time()
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
ports.clear()
sync = False
port_info = self._update_ports(ports)
# notify plugin about port deltas
if port_info:
LOG.debug("Agent loop has new devices!")
# If treat devices fails - must resync with plugin
sync = self._process_network_ports(port_info)
ports = port_info['current']
self._port_enable_control_metrics()
except Exception:
LOG.exception(_LE("Error in agent event loop"))
sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self._polling_interval):
time.sleep(self._polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)",
{'polling_interval': self._polling_interval,
'elapsed': elapsed})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
plugin = HyperVNeutronAgent()
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
plugin.daemon_loop()
| {
"content_hash": "22b532282f0e0af898365397dfd24db2",
"timestamp": "",
"source": "github",
"line_count": 448,
"max_line_length": 79,
"avg_line_length": 39.32142857142857,
"alnum_prop": 0.5518278837420527,
"repo_name": "rdo-management/neutron",
"id": "58580f5bd43ee5978189c2804d5004a8accf30a6",
"size": "18289",
"binary": false,
"copies": "2",
"ref": "refs/heads/mgt-master",
"path": "neutron/plugins/hyperv/agent/hyperv_neutron_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "24047"
},
{
"name": "Gettext Catalog",
"bytes": "575107"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "6918375"
},
{
"name": "Shell",
"bytes": "12287"
}
],
"symlink_target": ""
} |
from jnpr.junos import jxml
from jnpr.junos import jxml as JXML
from lxml.etree import _Element
from ncclient.operations.rpc import RPCError
class FactLoopError(RuntimeError):
"""
Generated when there is a loop in fact gathering.
"""
pass
class RpcError(Exception):
"""
Parent class for all junos-pyez RPC Exceptions
"""
def __init__(self, cmd=None, rsp=None, errs=None, dev=None,
timeout=None, re=None):
"""
:cmd: is the rpc command
:rsp: is the rpc response (after <rpc-reply>)
:errs: is a list of dictionaries of extracted <rpc-error> elements.
:dev: is the device rpc was executed on
:timeout: is the timeout value of the device
:re: is the RE or member exception occured on
"""
self.cmd = cmd
self.rsp = rsp
self.dev = dev
self.timeout = timeout
self.re = re
self.rpc_error = None
# To handle errors coming from ncclient, Here errs is list of RPCError
if isinstance(errs, RPCError) and hasattr(errs, 'errors'):
self.errs = [JXML.rpc_error(error.xml) for error in errs.errors]
for error in errs.errors:
if error.severity == 'error':
self.rsp = JXML.remove_namespaces(error.xml)
break
else:
if errs.severity == 'warning':
for error in errs.errors:
if error.severity == 'warning':
self.rsp = JXML.remove_namespaces(error.xml)
break
self.message = errs.message
else:
self.errs = errs
self.message = "\n".join(["%s: %s" % (err['severity'].strip(),
err['message'].strip())
for err in errs if err['message'] is not None
and err['severity'] is not None]) \
if isinstance(errs, list) else ''
if isinstance(self.rsp, _Element):
self.rpc_error = jxml.rpc_error(self.rsp)
self.message = self.message or self.rpc_error['message']
if self.errs is None or not isinstance(self.errs, list):
self.errs = [self.rpc_error]
def __repr__(self):
"""
pprints the response XML attribute
"""
if self.rpc_error is not None:
return "{0}(severity: {1}, bad_element: {2}, message: {3})"\
.format(self.__class__.__name__, self.rpc_error['severity'],
self.rpc_error['bad_element'], self.message)
else:
return self.__class__.__name__
__str__ = __repr__
class CommitError(RpcError):
"""
Generated in response to a commit-check or a commit action.
"""
def __init__(self, rsp, cmd=None, errs=None):
RpcError.__init__(self, cmd, rsp, errs)
def __repr__(self):
return "{0}(edit_path: {1}, bad_element: {2}, message: {3})"\
.format(self.__class__.__name__, self.rpc_error['edit_path'],
self.rpc_error['bad_element'], self.message)
__str__ = __repr__
class ConfigLoadError(RpcError):
"""
Generated in response to a failure when loading a configuration.
"""
def __init__(self, rsp, cmd=None, errs=None):
RpcError.__init__(self, cmd, rsp, errs)
def __repr__(self):
return "{0}(severity: {1}, bad_element: {2}, message: {3})"\
.format(self.__class__.__name__, self.rpc_error['severity'],
self.rpc_error['bad_element'], self.message)
__str__ = __repr__
class LockError(RpcError):
"""
Generated in response to attempting to take an exclusive
lock on the configuration database.
"""
def __init__(self, rsp):
RpcError.__init__(self, rsp=rsp)
class UnlockError(RpcError):
"""
Generated in response to attempting to unlock the
configuration database.
"""
def __init__(self, rsp):
RpcError.__init__(self, rsp=rsp)
class PermissionError(RpcError):
"""
Generated in response to invoking an RPC for which the
auth user does not have user-class permissions.
PermissionError.message gives you the specific RPC that cause
the exceptions
"""
def __init__(self, rsp, cmd=None, errs=None):
RpcError.__init__(self, cmd=cmd, rsp=rsp, errs=errs)
self.message = rsp.findtext('.//bad-element')
class RpcTimeoutError(RpcError):
"""
Generated in response to a RPC execution timeout.
"""
def __init__(self, dev, cmd, timeout):
RpcError.__init__(self, dev=dev, cmd=cmd, timeout=timeout)
def __repr__(self):
return "{0}(host: {1}, cmd: {2}, timeout: {3})"\
.format(self.__class__.__name__, self.dev.hostname,
self.cmd, self.timeout)
__str__ = __repr__
class SwRollbackError(RpcError):
"""
Generated in response to a SW rollback error.
"""
def __init__(self, rsp, re=None):
RpcError.__init__(self, re=re, rsp=rsp)
def __repr__(self):
if self.re:
return "{0}(re: {1}, output: {2})"\
.format(self.__class__.__name__, self.re, self.rsp)
else:
return "{0}(output: {1})".format(self.__class__.__name__,
self.rsp)
__str__ = __repr__
# ================================================================
# ================================================================
# Connection Exceptions
# ================================================================
# ================================================================
class ConnectError(Exception):
"""
Parent class for all connection related exceptions
"""
def __init__(self, dev, msg=None):
self.dev = dev
self._orig = msg
@property
def user(self):
""" login user-name """
return self.dev.user
@property
def host(self):
""" login host name/ipaddr """
return self.dev.hostname
@property
def port(self):
""" login SSH port """
return self.dev._port
@property
def msg(self):
""" login SSH port """
return self._orig
def __repr__(self):
if self._orig:
return "{0}(host: {1}, msg: {2})".format(self.__class__.__name__,
self.dev.hostname,
self._orig)
else:
return "{0}({1})".format(self.__class__.__name__,
self.dev.hostname)
__str__ = __repr__
class ProbeError(ConnectError):
"""
Generated if auto_probe is enabled and the probe action fails
"""
pass
class ConnectAuthError(ConnectError):
"""
Generated if the user-name, password is invalid
"""
pass
class ConnectTimeoutError(ConnectError):
"""
Generated if the NETCONF session fails to connect, could
be due to the fact the device is not ip reachable; bad
ipaddr or just due to routing
"""
pass
class ConnectUnknownHostError(ConnectError):
"""
Generated if the specific hostname does not DNS resolve
"""
pass
class ConnectRefusedError(ConnectError):
"""
Generated if the specified host denies the NETCONF; could
be that the services is not enabled, or the host has
too many connections already.
"""
pass
class ConnectNotMasterError(ConnectError):
"""
Generated if the connection is made to a non-master
routing-engine. This could be a backup RE on an MX
device, or a virtual-chassis member (linecard), for example
"""
pass
class ConnectClosedError(ConnectError):
"""
Generated if connection unexpectedly closed
"""
def __init__(self, dev):
ConnectError.__init__(self, dev=dev)
dev.connected = False
| {
"content_hash": "24f3dad606b386f67e5faa9979d1b169",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 83,
"avg_line_length": 27.306397306397308,
"alnum_prop": 0.5273736128236745,
"repo_name": "spidercensus/py-junos-eznc",
"id": "38651ef0011bd725c2c105992fbbf4e0102167f9",
"size": "8110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/jnpr/junos/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Pascal",
"bytes": "408"
},
{
"name": "Puppet",
"bytes": "2263"
},
{
"name": "Python",
"bytes": "689193"
},
{
"name": "Ruby",
"bytes": "4840"
},
{
"name": "Shell",
"bytes": "597"
}
],
"symlink_target": ""
} |
import json
from heatclient import exc
from heatclient.v1 import client as v1client
from keystoneclient.v2_0 import client as ksclient
def script_keystone_client():
ksclient.Client(auth_url='http://no.where',
insecure=False,
password='password',
tenant_id='',
tenant_name='tenant_name',
username='username').AndReturn(FakeKeystone('abcd1234'))
def script_heat_list():
resp_dict = {"stacks": [{
"id": "1",
"stack_name": "teststack",
"stack_status": 'CREATE_COMPLETE',
"creation_time": "2012-10-25T01:58:47Z"},
{
"id": "2",
"stack_name": "teststack2",
"stack_status": 'IN_PROGRESS',
"creation_time": "2012-10-25T01:58:47Z"
}]
}
resp = FakeHTTPResponse(200,
'success, you',
{'content-type': 'application/json'},
json.dumps(resp_dict))
v1client.Client.json_request('GET',
'/stacks?').AndReturn((resp, resp_dict))
def script_heat_normal_error():
resp_dict = {
"explanation": "The resource could not be found.",
"code": 404,
"error": {
"message": "The Stack (bad) could not be found.",
"type": "StackNotFound",
"traceback": "",
},
"title": "Not Found"
}
resp = FakeHTTPResponse(400,
'The resource could not be found',
{'content-type': 'application/json'},
json.dumps(resp_dict))
v1client.Client.json_request('GET', '/stacks/bad').AndRaise(
exc.from_response(resp, json.dumps(resp_dict)))
def script_heat_error(resp_string):
resp = FakeHTTPResponse(400,
'The resource could not be found',
{'content-type': 'application/json'},
resp_string)
v1client.Client.json_request('GET', '/stacks/bad').AndRaise(
exc.from_response(resp, resp_string))
def fake_headers():
return {'X-Auth-Token': 'abcd1234',
'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'}
class FakeServiceCatalog():
def url_for(self, endpoint_type, service_type):
return 'http://192.168.1.5:8004/v1/f14b41234'
class FakeKeystone():
service_catalog = FakeServiceCatalog()
def __init__(self, auth_token):
self.auth_token = auth_token
class FakeHTTPResponse():
version = 1.1
def __init__(self, status, reason, headers, body):
self.headers = headers
self.body = body
self.status = status
self.reason = reason
def getheader(self, name, default=None):
return self.headers.get(name, default)
def getheaders(self):
return self.headers.items()
def read(self, amt=None):
b = self.body
self.body = None
return b
| {
"content_hash": "fa35e5060baff348fb50d387154dc972",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 76,
"avg_line_length": 29.826923076923077,
"alnum_prop": 0.5341715022566086,
"repo_name": "citrix-openstack-build/python-heatclient",
"id": "199dad9f8695f29dea41cc1cc5a4f0c9ddf8724d",
"size": "3648",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "heatclient/tests/fakes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "131491"
},
{
"name": "Shell",
"bytes": "1123"
}
],
"symlink_target": ""
} |
"""
Installs and configures nova
"""
import os
import uuid
import logging
import platform
from packstack.installer import processors, utils, validators
from packstack.installer.exceptions import ScriptRuntimeError
from packstack.modules.ospluginutils import NovaConfig, getManifestTemplate, appendManifestFile, manifestfiles
# Controller object will be initialized from main flow
controller = None
PLUGIN_NAME = "OS-NOVA"
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
if platform.linux_distribution()[0] == "Fedora":
primary_netif = "em1"
secondary_netif = "em2"
else:
primary_netif = "eth0"
secondary_netif = "eth1"
nova_params = {
"NOVA" : [
{"CMD_OPTION" : "novaapi-host",
"USAGE" : "The IP address of the server on which to install the Nova API service",
"PROMPT" : "Enter the IP address of the Nova API service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ip, validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_API_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novacert-host",
"USAGE" : "The IP address of the server on which to install the Nova Cert service",
"PROMPT" : "Enter the IP address of the Nova Cert service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_CERT_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novavncproxy-hosts",
"USAGE" : "The IP address of the server on which to install the Nova VNC proxy",
"PROMPT" : "Enter the IP address of the Nova VNC proxy",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_VNCPROXY_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novacompute-hosts",
"USAGE" : "A comma separated list of IP addresses on which to install the Nova Compute services",
"PROMPT" : "Enter a comma separated list of IP addresses on which to install the Nova Compute services",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty, validators.validate_multi_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_COMPUTE_HOSTS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novaconductor-host",
"USAGE" : "The IP address of the server on which to install the Nova Conductor service",
"PROMPT" : "Enter the IP address of the Nova Conductor service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ip, validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_CONDUCTOR_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "nova-db-passwd",
"USAGE" : "The password to use for the Nova to access DB",
"PROMPT" : "Enter the password for the Nova DB access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NOVA_DB_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "nova-ks-passwd",
"USAGE" : "The password to use for the Nova to authenticate with Keystone",
"PROMPT" : "Enter the password for the Nova Keystone access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NOVA_KS_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "novasched-host",
"USAGE" : "The IP address of the server on which to install the Nova Scheduler service",
"PROMPT" : "Enter the IP address of the Nova Scheduler service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_SCHED_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novasched-cpu-allocation-ratio",
"USAGE" : "The overcommitment ratio for virtual to physical CPUs. "
"Set to 1.0 to disable CPU overcommitment",
"PROMPT" : "Enter the CPU overcommitment ratio. "
"Set to 1.0 to disable CPU overcommitment",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_float],
"DEFAULT_VALUE" : 16.0,
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novasched-ram-allocation-ratio",
"USAGE" : "The overcommitment ratio for virtual to physical RAM. "
"Set to 1.0 to disable RAM overcommitment",
"PROMPT" : "Enter the RAM overcommitment ratio. "
"Set to 1.0 to disable RAM overcommitment",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_float],
"DEFAULT_VALUE" : 1.5,
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
"NOVA_NETWORK" : [
{"CMD_OPTION" : "novacompute-privif",
"USAGE" : "Private interface for Flat DHCP on the Nova compute servers",
"PROMPT" : "Enter the Private interface for Flat DHCP on the Nova compute servers",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : secondary_netif,
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_COMPUTE_PRIVIF",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-host",
"USAGE" : "The IP address of the server on which to install the Nova Network service",
"PROMPT" : "Enter the IP address of the Nova Network service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ip, validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-manager",
"USAGE" : "Nova network manager",
"PROMPT" : "Enter the Nova network manager",
"OPTION_LIST" : [r'^nova\.network\.manager\.\w+Manager$'],
"VALIDATORS" : [validators.validate_regexp],
"DEFAULT_VALUE" : "nova.network.manager.FlatDHCPManager",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_MANAGER",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-pubif",
"USAGE" : "Public interface on the Nova network server",
"PROMPT" : "Enter the Public interface on the Nova network server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : primary_netif,
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_PUBIF",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-privif",
"USAGE" : "Private interface for network manager on the Nova network server",
"PROMPT" : "Enter the Private interface for network manager on the Nova network server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : secondary_netif,
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_PRIVIF",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-fixed-range",
"USAGE" : "IP Range for network manager",
"PROMPT" : "Enter the IP Range for network manager",
"OPTION_LIST" : ["^[\:\.\da-fA-f]+(\/\d+){0,1}$"],
"PROCESSORS" : [processors.process_cidr],
"VALIDATORS" : [validators.validate_regexp],
"DEFAULT_VALUE" : "192.168.32.0/22",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_FIXEDRANGE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-floating-range",
"USAGE" : "IP Range for Floating IP's",
"PROMPT" : "Enter the IP Range for Floating IP's",
"OPTION_LIST" : ["^[\:\.\da-fA-f]+(\/\d+){0,1}$"],
"PROCESSORS" : [processors.process_cidr],
"VALIDATORS" : [validators.validate_regexp],
"DEFAULT_VALUE" : "10.3.4.0/22",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_FLOATRANGE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-default-floating-pool",
"USAGE" : "Name of the default floating pool to which the specified floating ranges are added to",
"PROMPT" : "What should the default floating pool be called?",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "nova",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_DEFAULTFLOATINGPOOL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-auto-assign-floating-ip",
"USAGE" : "Automatically assign a floating IP to new instances",
"PROMPT" : "Should new instances automatically have a floating IP assigned?",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "n",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
"NOVA_NETWORK_VLAN" : [
{"CMD_OPTION" : "novanetwork-vlan-start",
"USAGE" : "First VLAN for private networks",
"PROMPT" : "Enter first VLAN for private networks",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : 100,
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_VLAN_START",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-num-networks",
"USAGE" : "Number of networks to support",
"PROMPT" : "How many networks should be supported",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : 1,
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_NUMBER",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-network-size",
"USAGE" : "Number of addresses in each private subnet",
"PROMPT" : "How many addresses should be in each private subnet",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : 255,
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_SIZE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
}
def use_nova_network(config):
return config['CONFIG_NOVA_INSTALL'] == 'y' and \
config['CONFIG_NEUTRON_INSTALL'] != 'y'
def use_nova_network_vlan(config):
manager = 'nova.network.manager.VlanManager'
return config['CONFIG_NOVA_INSTALL'] == 'y' and \
config['CONFIG_NEUTRON_INSTALL'] != 'y' and \
config['CONFIG_NOVA_NETWORK_MANAGER'] == manager
nova_groups = [
{"GROUP_NAME" : "NOVA",
"DESCRIPTION" : "Nova Options",
"PRE_CONDITION" : "CONFIG_NOVA_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True},
{"GROUP_NAME" : "NOVA_NETWORK",
"DESCRIPTION" : "Nova Network Options",
"PRE_CONDITION" : use_nova_network,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True},
{"GROUP_NAME" : "NOVA_NETWORK_VLAN",
"DESCRIPTION" : "Nova Network VLAN Options",
"PRE_CONDITION" : use_nova_network_vlan,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True},
]
for group in nova_groups:
paramList = nova_params[group["GROUP_NAME"]]
controller.addGroup(group, paramList)
def initSequences(controller):
if controller.CONF['CONFIG_NOVA_INSTALL'] != 'y':
return
novaapisteps = [
{'title': 'Adding Nova API manifest entries', 'functions':[createapimanifest]},
{'title': 'Adding Nova Keystone manifest entries', 'functions':[createkeystonemanifest]},
{'title': 'Adding Nova Cert manifest entries', 'functions':[createcertmanifest]},
{'title': 'Adding Nova Conductor manifest entries', 'functions':[createconductormanifest]},
{'title': 'Adding Nova Compute manifest entries', 'functions':[createcomputemanifest]},
{'title': 'Adding Nova Scheduler manifest entries', 'functions':[createschedmanifest]},
{'title': 'Adding Nova VNC Proxy manifest entries', 'functions':[createvncproxymanifest]},
{'title': 'Adding Nova Common manifest entries', 'functions':[createcommonmanifest]},
]
if controller.CONF['CONFIG_NEUTRON_INSTALL'] == 'y':
novaapisteps.append({'title': 'Adding Openstack Network-related Nova manifest entries', 'functions':[createneutronmanifest]})
else:
novaapisteps.append({'title': 'Adding Nova Network manifest entries', 'functions':[createnetworkmanifest]})
controller.addSequence("Installing OpenStack Nova API", [], [], novaapisteps)
def createapimanifest(config):
# This is a hack around us needing to generate the neutron metadata
# password, but the nova puppet plugin uses the existence of that
# password to determine whether or not to configure neutron metadata
# proxy support. So the nova_api.pp template needs unquoted 'undef'
# to disable metadata support if neutron is not being installed.
if controller.CONF['CONFIG_NEUTRON_INSTALL'] != 'y':
controller.CONF['CONFIG_NEUTRON_METADATA_PW_UNQUOTED'] = 'undef'
else:
controller.CONF['CONFIG_NEUTRON_METADATA_PW_UNQUOTED'] = \
"'%s'" % controller.CONF['CONFIG_NEUTRON_METADATA_PW']
manifestfile = "%s_api_nova.pp"%controller.CONF['CONFIG_NOVA_API_HOST']
manifestdata = getManifestTemplate("nova_api.pp")
appendManifestFile(manifestfile, manifestdata, 'novaapi')
def createkeystonemanifest(config):
manifestfile = "%s_keystone.pp"%controller.CONF['CONFIG_KEYSTONE_HOST']
manifestdata = getManifestTemplate("keystone_nova.pp")
appendManifestFile(manifestfile, manifestdata)
def createcertmanifest(config):
manifestfile = "%s_nova.pp"%controller.CONF['CONFIG_NOVA_CERT_HOST']
manifestdata = getManifestTemplate("nova_cert.pp")
appendManifestFile(manifestfile, manifestdata)
def createconductormanifest(config):
manifestfile = "%s_nova.pp"%controller.CONF['CONFIG_NOVA_CONDUCTOR_HOST']
manifestdata = getManifestTemplate("nova_conductor.pp")
appendManifestFile(manifestfile, manifestdata)
def check_ifcfg(host, device):
"""
Raises ScriptRuntimeError if given host does not have give device.
"""
server = utils.ScriptRunner(host)
cmd = "ip addr show dev %s || ( echo Device %s does not exist && exit 1 )"
server.append(cmd % (device, device))
server.execute()
def bring_up_ifcfg(host, device):
"""
Brings given device up if it's down. Raises ScriptRuntimeError in case
of failure.
"""
server = utils.ScriptRunner(host)
server.append('ip link show up | grep "%s"' % device)
try:
server.execute()
except ScriptRuntimeError:
server.clear()
cmd = 'ip link set dev %s up'
server.append(cmd % device)
try:
server.execute()
except ScriptRuntimeError:
msg = ('Failed to bring up network interface %s on host %s.'
' Interface should be up so Openstack can work'
' properly.' % (device, host))
raise ScriptRuntimeError(msg)
def createcomputemanifest(config):
dirty = controller.CONF["CONFIG_NOVA_COMPUTE_HOSTS"].split(",")
hostlist = [i.strip() for i in dirty if i.strip()]
for host in hostlist:
controller.CONF["CONFIG_NOVA_COMPUTE_HOST"] = host
manifestdata = getManifestTemplate("nova_compute.pp")
if controller.CONF['CONFIG_CINDER_INSTALL'] == 'y' and controller.CONF['CONFIG_CINDER_BACKEND'] == 'gluster':
manifestdata += getManifestTemplate("nova_gluster.pp")
if controller.CONF['CONFIG_CINDER_INSTALL'] == 'y' and controller.CONF['CONFIG_CINDER_BACKEND'] == 'nfs':
manifestdata += getManifestTemplate("nova_nfs.pp")
manifestfile = "%s_nova.pp"%host
nova_config_options = NovaConfig()
if controller.CONF['CONFIG_NEUTRON_INSTALL'] != 'y':
if host != controller.CONF["CONFIG_NOVA_NETWORK_HOST"]:
nova_config_options.addOption("DEFAULT/flat_interface", controller.CONF['CONFIG_NOVA_COMPUTE_PRIVIF'])
check_ifcfg(host, controller.CONF['CONFIG_NOVA_COMPUTE_PRIVIF'])
try:
bring_up_ifcfg(host, controller.CONF['CONFIG_NOVA_COMPUTE_PRIVIF'])
except ScriptRuntimeError as ex:
# just warn user to do it by himself
controller.MESSAGES.append(str(ex))
if controller.CONF['CONFIG_CEILOMETER_INSTALL'] == 'y':
manifestdata += getManifestTemplate("nova_ceilometer.pp")
appendManifestFile(manifestfile, manifestdata + "\n" + nova_config_options.getManifestEntry())
def createnetworkmanifest(config):
if controller.CONF['CONFIG_NEUTRON_INSTALL'] == "y":
return
host = controller.CONF['CONFIG_NOVA_NETWORK_HOST']
for i in ('CONFIG_NOVA_NETWORK_PRIVIF', 'CONFIG_NOVA_NETWORK_PUBIF'):
check_ifcfg(host, controller.CONF[i])
try:
bring_up_ifcfg(host, controller.CONF[i])
except ScriptRuntimeError as ex:
# just warn user to do it by himself
controller.MESSAGES.append(str(ex))
if controller.CONF['CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP'] == "y":
controller.CONF['CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP'] = True
else:
controller.CONF['CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP'] = False
# We need to explicitly set the network size
routing_prefix = controller.CONF['CONFIG_NOVA_NETWORK_FIXEDRANGE'].split('/')[1]
net_size = 2**(32 - int(routing_prefix))
controller.CONF['CONFIG_NOVA_NETWORK_FIXEDSIZE'] = str(net_size)
# Default VLAN parameters to avoid KeyError exceptions in case of VlanManager
# is not used
vlan_manager = 'nova.network.manager.VlanManager'
if config['CONFIG_NOVA_NETWORK_MANAGER'] != vlan_manager:
config['CONFIG_NOVA_NETWORK_VLAN_START'] = 100
config['CONFIG_NOVA_NETWORK_SIZE'] = 255
config['CONFIG_NOVA_NETWORK_NUMBER'] = 1
manifestfile = "%s_nova.pp" % host
manifestdata = getManifestTemplate("nova_network.pp")
appendManifestFile(manifestfile, manifestdata)
def createschedmanifest(config):
manifestfile = "%s_nova.pp"%controller.CONF['CONFIG_NOVA_SCHED_HOST']
manifestdata = getManifestTemplate("nova_sched.pp")
appendManifestFile(manifestfile, manifestdata)
def createvncproxymanifest(config):
manifestfile = "%s_nova.pp"%controller.CONF['CONFIG_NOVA_VNCPROXY_HOST']
manifestdata = getManifestTemplate("nova_vncproxy.pp")
appendManifestFile(manifestfile, manifestdata)
def createcommonmanifest(config):
dbhost = config['CONFIG_MYSQL_HOST']
dirty = controller.CONF["CONFIG_NOVA_COMPUTE_HOSTS"].split(",")
nopass_nodes = [i.strip() for i in dirty if i.strip()]
dirty = [config.get('CONFIG_NOVA_CONDUCTOR_HOST'),
config.get('CONFIG_NOVA_API_HOST'),
config.get('CONFIG_NOVA_CERT_HOST'),
config.get('CONFIG_NOVA_VNCPROXY_HOST'),
config.get('CONFIG_NOVA_SCHED_HOST'),
config.get('CONFIG_NOVA_NETWORK_HOST')]
dbpass_nodes = [i.strip() for i in dirty if i and i.strip()]
for manifestfile, marker in manifestfiles.getFiles():
if manifestfile.endswith("_nova.pp"):
host, manifest = manifestfile.split('_', 1)
host = host.strip()
if host in nopass_nodes and host not in dbpass_nodes:
# we should omit password in case we are installing only
# nova-compute to the host
perms = "nova"
else:
perms = "nova:%(CONFIG_NOVA_DB_PW)s" % config
config['CONFIG_NOVA_SQL_CONN'] = ("mysql://%s@%s/nova"
% (perms, dbhost))
data = getManifestTemplate("nova_common.pp")
appendManifestFile(os.path.split(manifestfile)[1], data)
def createneutronmanifest(config):
if controller.CONF['CONFIG_NEUTRON_INSTALL'] != "y":
return
if controller.CONF['CONFIG_NEUTRON_L2_PLUGIN'] == 'openvswitch':
controller.CONF['CONFIG_NOVA_LIBVIRT_VIF_DRIVER'] = 'nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver'
else:
controller.CONF['CONFIG_NOVA_LIBVIRT_VIF_DRIVER'] = 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver'
for manifestfile, marker in manifestfiles.getFiles():
if manifestfile.endswith("_nova.pp"):
data = getManifestTemplate("nova_neutron.pp")
appendManifestFile(os.path.split(manifestfile)[1], data)
| {
"content_hash": "ceb73bfa805e05bfe957516f57981542",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 133,
"avg_line_length": 51.0036036036036,
"alnum_prop": 0.5121701345956831,
"repo_name": "pkilambi/packstack",
"id": "b037a6620a2235c381c7f818be89afee21a69fda",
"size": "28307",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "packstack/plugins/nova_300.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Puppet",
"bytes": "40368"
},
{
"name": "Python",
"bytes": "334375"
},
{
"name": "Ruby",
"bytes": "15291"
},
{
"name": "Shell",
"bytes": "459"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from exam import fixture
from django.core.urlresolvers import reverse
from sentry.models import ProjectOption
from sentry.testutils import TestCase
class ManageProjectPluginsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-manage-project-plugins', args=[
self.organization.slug, self.project.slug])
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_renders_with_required_context(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/projects/plugins/manage.html')
def test_saves_settings(self):
self.login_as(self.user)
resp = self.client.post(self.path, {
'plugin': ['os', 'urls'],
})
assert resp.status_code == 302
opts = dict(
(p.key, p.value)
for p in ProjectOption.objects.filter(
project=self.project,
key__in=[
'auto_tag:_operating_systems:enabled', 'auto_tag:_urls:enabled',
'mail:enabled',
],
),
)
print opts
assert opts.get('auto_tag:_operating_systems:enabled') is True
assert opts.get('auto_tag:_urls:enabled') is True
assert opts.get('mail:enabled') is False
| {
"content_hash": "6a333869ba218ec4ed5a7e7b5bc96fc8",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 84,
"avg_line_length": 30.25,
"alnum_prop": 0.6081267217630854,
"repo_name": "daevaorn/sentry",
"id": "9837585d7951e11e004709e9036db84a4254a91a",
"size": "1452",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/sentry/web/frontend/test_project_plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "174905"
},
{
"name": "HTML",
"bytes": "200247"
},
{
"name": "JavaScript",
"bytes": "618375"
},
{
"name": "Lua",
"bytes": "21966"
},
{
"name": "Makefile",
"bytes": "5052"
},
{
"name": "Python",
"bytes": "8680827"
},
{
"name": "Shell",
"bytes": "746"
}
],
"symlink_target": ""
} |
"""Cleans up old trace from Treadmill."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import time
import click
from treadmill.apptrace import zk
from treadmill import context
from treadmill import zknamespace as z
from treadmill import zkutils
_LOGGER = logging.getLogger(__name__)
# Interval between cleanup - every min
TRACE_CLEANUP_INTERVAL = 60
# Default max service trace events count.
TRACE_SERVICE_EVENTS_MAX_COUNT = 10
# Number of traces in batch.
TRACE_BATCH = 5000
# Default trace expiration - 5min.
TRACE_EXPIRE_AFTER = 5 * 60
# Number of traces in batch.
FINISHED_BATCH = 5000
# Default trace expiration - 5min.
FINISHED_EXPIRE_AFTER = 5 * 60
# Default msx finished history count.
FINISHED_HISTORY_MAX_COUNT = 100
# Default max trace history count.
TRACE_HISTORY_MAX_COUNT = 100
def init():
"""Top level command handler."""
@click.group()
def trace():
"""Manage Treadmill traces."""
pass
@trace.command()
@click.option('--interval', help='Timeout between checks (sec).',
default=TRACE_CLEANUP_INTERVAL)
@click.option('--trace-service-events-max-count',
help='Max service trace events (running/exited) to keep.',
type=int, default=TRACE_SERVICE_EVENTS_MAX_COUNT)
@click.option('--trace-batch-size', help='Batch size.',
type=int, default=TRACE_BATCH)
@click.option('--trace-expire-after', help='Expire after (sec).',
type=int, default=TRACE_EXPIRE_AFTER)
@click.option('--trace-history-max-count',
help='Max trace history to keep.',
type=int, default=TRACE_HISTORY_MAX_COUNT)
@click.option('--finished-batch-size', help='Batch size.',
type=int, default=FINISHED_BATCH)
@click.option('--finished-expire-after', help='Expire after (sec).',
type=int, default=FINISHED_EXPIRE_AFTER)
@click.option('--finished-history-max-count',
help='Max finished history to keep.',
type=int, default=FINISHED_HISTORY_MAX_COUNT)
@click.option('--no-lock', is_flag=True, default=False,
help='Run without lock.')
def cleanup(interval,
trace_service_events_max_count,
trace_batch_size,
trace_expire_after,
trace_history_max_count,
finished_batch_size,
finished_expire_after,
finished_history_max_count,
no_lock):
"""Cleans up old traces."""
def _cleanup():
"""Do cleanup."""
while True:
zk.prune_trace(
context.GLOBAL.zk.conn,
trace_service_events_max_count
)
zk.cleanup_trace(
context.GLOBAL.zk.conn,
trace_batch_size,
trace_expire_after
)
zk.cleanup_finished(
context.GLOBAL.zk.conn,
finished_batch_size,
finished_expire_after
)
zk.cleanup_trace_history(
context.GLOBAL.zk.conn,
trace_history_max_count
)
zk.cleanup_finished_history(
context.GLOBAL.zk.conn,
finished_history_max_count
)
_LOGGER.info('Finished cleanup, sleep %s sec', interval)
time.sleep(interval)
if no_lock:
_cleanup()
else:
lock = zkutils.make_lock(context.GLOBAL.zk.conn,
z.path.election(__name__))
_LOGGER.info('Waiting for leader lock.')
with lock:
_cleanup()
del cleanup
return trace
| {
"content_hash": "9e771a77a77057b5ca28503ad13289a7",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 76,
"avg_line_length": 31.519685039370078,
"alnum_prop": 0.5578316262802898,
"repo_name": "captiosus/treadmill",
"id": "b5590eb0c72cecb7edd699d505ec352a61e5b02a",
"size": "4003",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "treadmill/sproc/trace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "570"
},
{
"name": "Python",
"bytes": "2598791"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "58099"
}
],
"symlink_target": ""
} |
import warnings
import numpy as np
from numpy import nanmean, nansum
__all__ = [
"median",
"nanmedian",
"nansum",
"nanmean",
"nanvar",
"nanstd",
"nanmin",
"nanmax",
"nanargmin",
"nanargmax",
"ss",
"anynan",
"allnan",
]
def nanargmin(a, axis=None):
"Slow nanargmin function used for unaccelerated dtypes."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanargmin(a, axis=axis)
def nanargmax(a, axis=None):
"Slow nanargmax function used for unaccelerated dtypes."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanargmax(a, axis=axis)
def nanvar(a, axis=None, ddof=0):
"Slow nanvar function used for unaccelerated dtypes."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanvar(a, axis=axis, ddof=ddof)
def nanstd(a, axis=None, ddof=0):
"Slow nanstd function used for unaccelerated dtypes."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanstd(a, axis=axis, ddof=ddof)
def nanmin(a, axis=None):
"Slow nanmin function used for unaccelerated dtypes."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanmin(a, axis=axis)
def nanmax(a, axis=None):
"Slow nanmax function used for unaccelerated dtypes."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanmax(a, axis=axis)
def median(a, axis=None):
"Slow median function used for unaccelerated dtypes."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.median(a, axis=axis)
def nanmedian(a, axis=None):
"Slow nanmedian function used for unaccelerated dtypes."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanmedian(a, axis=axis)
def ss(a, axis=None):
"Slow sum of squares used for unaccelerated dtypes."
a = np.asarray(a)
y = np.multiply(a, a).sum(axis)
return y
def anynan(a, axis=None):
"Slow check for Nans used for unaccelerated dtypes."
return np.isnan(a).any(axis)
def allnan(a, axis=None):
"Slow check for all Nans used for unaccelerated dtypes."
return np.isnan(a).all(axis)
| {
"content_hash": "feb578d54159c596bbb875f09e0a05d4",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 60,
"avg_line_length": 25.402173913043477,
"alnum_prop": 0.660676080445015,
"repo_name": "kwgoodman/bottleneck",
"id": "4743bdb43b7b2aba26aaef4aeb08994b17cd2ed4",
"size": "2337",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bottleneck/slow/reduce.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2510"
},
{
"name": "C",
"bytes": "207203"
},
{
"name": "Dockerfile",
"bytes": "683"
},
{
"name": "Makefile",
"bytes": "2126"
},
{
"name": "Python",
"bytes": "203727"
},
{
"name": "Shell",
"bytes": "3736"
}
],
"symlink_target": ""
} |
import taxcalc
import dropq
import pandas as pd
import os
import json
from rq import Queue
from worker import conn
import time
q = Queue(connection=conn)
def test_func(personal_exemp, exemp_start, phase_out):
job = q.enqueue(do_work, personal_exemp, exemp_start, phase_out)
time.sleep(1)
while(not job.result):
time.sleep(1)
return job.result
def do_work(personal_exemp, exemp_start, phase_out):
global tax_dta
myvars = {}
myvars['_rt4'] = [0.39]
user_mods = json.dumps(myvars)
print "begin work"
cur_path = os.path.abspath(os.path.dirname(__file__))
tax_dta = pd.read_csv(os.path.join(cur_path, "./puf2.csv"))
mY_dec, df_dec, mY_bin, df_bin = dropq.run_models(tax_dta, user_mods=user_mods)
print "end work"
results = (personal_exemp - exemp_start) * phase_out
return results
| {
"content_hash": "a3dc99e7ebbc02ddc45bd1bd75becbfd",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 83,
"avg_line_length": 26.53125,
"alnum_prop": 0.6702002355712603,
"repo_name": "PeterDSteinberg/webapp-public",
"id": "ecb09832cfe18f708bab6a65865d5ce3937dd721",
"size": "849",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "webapp/apps/taxbrain/test_function.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "856744"
},
{
"name": "HTML",
"bytes": "61933"
},
{
"name": "JavaScript",
"bytes": "85905"
},
{
"name": "Python",
"bytes": "381167"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
} |
from abc import ABCMeta
#-----------------------------------------------------------------------------
# BaseTranslator interface
class BaseTranslator(object):
"""
An abstract class for translating between filepaths and
:class:`drslib.drs.DRS` objects.
Concrete subclasses are returned by factory functions such as
:mod:`drslib.cmip5.make_translator`.
:property prefix: The prefix for all DRS paths including the
activity. All paths are interpreted as relative to this
prefix. Generated paths have this prefix added.
"""
__metaclass__ = ABCMeta
def __init__(self, prefix=''):
raise NotImplementedError
def filename_to_drs(self, filename):
"""
Translate a filename into a :class:`drslib.drs.DRS` object.
Only those DRS components known from the filename will be set.
"""
raise NotImplementedError
def path_to_drs(self, path):
"""
Translate a directory path into a :class:`drslib.drs.DRS`
object.
Only those DRS components known from the path will be set.
"""
raise NotImplementedError
def filepath_to_drs(self, filepath):
"""
Translate a full filepath to a :class:`drslib.drs.DRS` object.
"""
raise NotImplementedError
def drs_to_filepath(self, drs):
"""
Translate a :class:`drslib.drs.DRS` object into a full filepath.
"""
raise NotImplementedError
def drs_to_path(self, drs):
"""
Translate a :class:`drslib.drs.DRS` object into a directory path.
"""
raise NotImplementedError
def drs_to_file(self, drs):
"""
Translate a :class:`drslib.drs.DRS` object into a filename.
"""
raise NotImplementedError
| {
"content_hash": "12663a203ae04259b8558d27e587f4cf",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.586038961038961,
"repo_name": "ESGF/esgf-drslib",
"id": "e8026dbe70ad264697aa6dcb79aa995df5f04d4c",
"size": "2053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drslib/translate_iface.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "LiveScript",
"bytes": "886674"
},
{
"name": "Python",
"bytes": "321057"
},
{
"name": "Shell",
"bytes": "9958"
}
],
"symlink_target": ""
} |
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.finetuning import BackboneFinetuning, BaseFinetuning
from pytorch_lightning.callbacks.gpu_stats_monitor import GPUStatsMonitor
from pytorch_lightning.callbacks.gradient_accumulation_scheduler import GradientAccumulationScheduler
from pytorch_lightning.callbacks.lambda_function import LambdaCallback
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter
from pytorch_lightning.callbacks.progress import ProgressBar, ProgressBarBase, RichProgressBar
from pytorch_lightning.callbacks.pruning import ModelPruning
from pytorch_lightning.callbacks.quantization import QuantizationAwareTraining
from pytorch_lightning.callbacks.stochastic_weight_avg import StochasticWeightAveraging
from pytorch_lightning.callbacks.timer import Timer
from pytorch_lightning.callbacks.xla_stats_monitor import XLAStatsMonitor
__all__ = [
"BackboneFinetuning",
"BaseFinetuning",
"Callback",
"EarlyStopping",
"GPUStatsMonitor",
"XLAStatsMonitor",
"GradientAccumulationScheduler",
"LambdaCallback",
"LearningRateMonitor",
"ModelCheckpoint",
"ModelPruning",
"BasePredictionWriter",
"ProgressBar",
"ProgressBarBase",
"QuantizationAwareTraining",
"StochasticWeightAveraging",
"Timer",
"RichProgressBar",
]
| {
"content_hash": "d05f82adae9672a1c3d9c3fccf8a7758",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 101,
"avg_line_length": 43.75,
"alnum_prop": 0.8222222222222222,
"repo_name": "williamFalcon/pytorch-lightning",
"id": "d2c405b5c2d10073fcd55e3238ac1bebde7073f6",
"size": "2161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytorch_lightning/callbacks/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "511511"
},
{
"name": "Shell",
"bytes": "1731"
}
],
"symlink_target": ""
} |
import argparse
import sys
from ros_buildfarm.argument import add_argument_arch
from ros_buildfarm.argument import add_argument_build_name
from ros_buildfarm.argument import add_argument_config_url
from ros_buildfarm.argument import add_argument_dry_run
from ros_buildfarm.argument import add_argument_os_code_name
from ros_buildfarm.argument import add_argument_os_name
from ros_buildfarm.argument import add_argument_repository_name
from ros_buildfarm.argument import add_argument_rosdistro_name
from ros_buildfarm.devel_job import configure_devel_job
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Generate a 'devel' job on Jenkins")
add_argument_config_url(parser)
add_argument_rosdistro_name(parser)
add_argument_build_name(parser, 'source')
add_argument_repository_name(parser)
add_argument_os_name(parser)
add_argument_os_code_name(parser)
add_argument_arch(parser)
add_argument_dry_run(parser)
args = parser.parse_args(argv)
configure_devel_job(
args.config_url, args.rosdistro_name, args.source_build_name,
args.repository_name, args.os_name, args.os_code_name, args.arch,
dry_run=args.dry_run)
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "bca9ed14e665fe49a848b9622de35bec",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 73,
"avg_line_length": 35.111111111111114,
"alnum_prop": 0.745253164556962,
"repo_name": "ros-infrastructure/ros_buildfarm",
"id": "f39166129c4991e7a041da012b3a0a0e0798661a",
"size": "1871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ros_buildfarm/scripts/devel/generate_devel_job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5328"
},
{
"name": "EmberScript",
"bytes": "352484"
},
{
"name": "Groovy",
"bytes": "1561"
},
{
"name": "JavaScript",
"bytes": "13229"
},
{
"name": "Python",
"bytes": "784731"
},
{
"name": "Shell",
"bytes": "10950"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.