_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q36900 | get_default_shell | train | def get_default_shell():
"""
return the path to the default shell for the current user.
"""
if is_windows():
return 'cmd.exe'
else:
import pwd
import getpass
if 'SHELL' in os.environ:
return os.environ['SHELL']
else:
username = getpass.getuser()
shell = pwd.getpwnam(username).pw_shell
return shell | python | {
"resource": ""
} |
q36901 | _confirm_or_prompt_or_command | train | def _confirm_or_prompt_or_command(pymux):
" True when we are waiting for a command, prompt or confirmation. "
client_state = pymux.get_client_state()
if client_state.confirm_text or client_state.prompt_command or client_state.command_mode:
return True | python | {
"resource": ""
} |
q36902 | BaseModel.mutate | train | def mutate(self):
'''
Mutate to next state
:return: True if mutated, False if not
'''
self._get_ready()
if self._is_last_index():
return False
self._current_index += 1
self._mutate()
return True | python | {
"resource": ""
} |
q36903 | KittyWebClientApi.get_stats | train | def get_stats(self):
'''
Get kitty stats as a dictionary
'''
resp = requests.get('%s/api/stats.json' % self.url)
assert(resp.status_code == 200)
return resp.json() | python | {
"resource": ""
} |
q36904 | BaseFuzzer._handle_options | train | def _handle_options(self, option_line):
'''
Handle options from command line, in docopt style.
This allows passing arguments to the fuzzer from the command line
without the need to re-write it in each runner.
:param option_line: string with the command line options to be parsed.
'''
if option_line is not None:
usage = '''
These are the options to the kitty fuzzer object, not the options to the runner.
Usage:
fuzzer [options] [-v ...]
Options:
-d --delay <delay> delay between tests in secodes, float number
-f --session <session-file> session file name to use
-n --no-env-test don't perform environment test before the fuzzing session
-r --retest <session-file> retest failed/error tests from a session file
-t --test-list <test-list> a comma delimited test list string of the form "-10,12,15-20,30-"
-v --verbose be more verbose in the log
Removed options:
end, start - use --test-list instead
'''
options = docopt.docopt(usage, shlex.split(option_line))
# ranges
if options['--retest']:
retest_file = options['--retest']
try:
test_list_str = self._get_test_list_from_session_file(retest_file)
except Exception as ex:
raise KittyException('Failed to open session file (%s) for retesting: %s' % (retest_file, ex))
else:
test_list_str = options['--test-list']
self._set_test_ranges(None, None, test_list_str)
# session file
session_file = options['--session']
if session_file is not None:
self.set_session_file(session_file)
# delay between tests
delay = options['--delay']
if delay is not None:
self.set_delay_between_tests(float(delay))
# environment test
skip_env_test = options['--no-env-test']
if skip_env_test:
self.set_skip_env_test(True)
# verbosity
verbosity = options['--verbose']
self.set_verbosity(verbosity) | python | {
"resource": ""
} |
q36905 | BaseFuzzer.set_model | train | def set_model(self, model):
'''
Set the model to fuzz
:type model: :class:`~kitty.model.high_level.base.BaseModel` or a subclass
:param model: Model object to fuzz
'''
self.model = model
if self.model:
self.model.set_notification_handler(self)
self.handle_stage_changed(model)
return self | python | {
"resource": ""
} |
q36906 | BaseFuzzer.set_range | train | def set_range(self, start_index=0, end_index=None):
'''
Set range of tests to run
.. deprecated::
use :func:`~kitty.fuzzers.base.BaseFuzzer.set_test_list`
:param start_index: index to start at (default=0)
:param end_index: index to end at(default=None)
'''
if end_index is not None:
end_index += 1
self._test_list = StartEndList(start_index, end_index)
self.session_info.start_index = start_index
self.session_info.current_index = 0
self.session_info.end_index = end_index
self.session_info.test_list_str = self._test_list.as_test_list_str()
return self | python | {
"resource": ""
} |
q36907 | BaseFuzzer.start | train | def start(self):
'''
Start the fuzzing session
If fuzzer already running, it will return immediatly
'''
if self._started:
self.logger.warning('called while fuzzer is running. ignoring.')
return
self._started = True
assert(self.model)
assert(self.user_interface)
assert(self.target)
if self._load_session():
self._check_session_validity()
self._set_test_ranges(
self.session_info.start_index,
self.session_info.end_index,
self.session_info.test_list_str
)
else:
self.session_info.kitty_version = _get_current_version()
# TODO: write hash for high level
self.session_info.data_model_hash = self.model.hash()
# if self.session_info.end_index is None:
# self.session_info.end_index = self.model.last_index()
if self._test_list is None:
self._test_list = StartEndList(0, self.model.num_mutations())
else:
self._test_list.set_last(self.model.last_index())
list_count = self._test_list.get_count()
self._test_list.skip(list_count - 1)
self.session_info.end_index = self._test_list.current()
self._test_list.reset()
self._store_session()
self._test_list.skip(self.session_info.current_index)
self.session_info.test_list_str = self._test_list.as_test_list_str()
self._set_signal_handler()
self.user_interface.set_data_provider(self.dataman)
self.user_interface.set_continue_event(self._continue_event)
self.user_interface.start()
self.session_info.start_time = time.time()
try:
self._start_message()
self.target.setup()
start_from = self.session_info.current_index
if self._skip_env_test:
self.logger.info('Skipping environment test')
else:
self.logger.info('Performing environment test')
self._test_environment()
self._in_environment_test = False
self._test_list.reset()
self._test_list.skip(start_from)
self.session_info.current_index = start_from
self.model.skip(self._test_list.current())
self._start()
return True
except Exception as e:
self.logger.error('Error occurred while fuzzing: %s', repr(e))
self.logger.error(traceback.format_exc())
return False | python | {
"resource": ""
} |
q36908 | BaseFuzzer.handle_stage_changed | train | def handle_stage_changed(self, model):
'''
handle a stage change in the data model
:param model: the data model that was changed
'''
stages = model.get_stages()
if self.dataman:
self.dataman.set('stages', stages) | python | {
"resource": ""
} |
q36909 | BaseFuzzer.stop | train | def stop(self):
'''
stop the fuzzing session
'''
assert(self.model)
assert(self.user_interface)
assert(self.target)
self.user_interface.stop()
self.target.teardown()
self.dataman.submit_task(None)
self._un_set_signal_handler() | python | {
"resource": ""
} |
q36910 | BaseFuzzer._keep_running | train | def _keep_running(self):
'''
Should we still fuzz??
'''
if self.config.max_failures:
if self.session_info.failure_count >= self.config.max_failures:
return False
return self._test_list.current() is not None | python | {
"resource": ""
} |
q36911 | KittyObject.set_verbosity | train | def set_verbosity(cls, verbosity):
'''
Set verbosity of logger
:param verbosity: verbosity level. currently, we only support 1 (logging.DEBUG)
'''
if verbosity > 0:
# currently, we only toggle between INFO, DEBUG
logger = KittyObject.get_logger()
levels = [logging.DEBUG]
verbosity = min(verbosity, len(levels)) - 1
logger.setLevel(levels[verbosity]) | python | {
"resource": ""
} |
q36912 | KittyObject.not_implemented | train | def not_implemented(self, func_name):
'''
log access to unimplemented method and raise error
:param func_name: name of unimplemented function.
:raise: NotImplementedError detailing the function the is not implemented.
'''
msg = '%s is not overridden by %s' % (func_name, type(self).__name__)
self.logger.error(msg)
raise NotImplementedError(msg) | python | {
"resource": ""
} |
q36913 | synced | train | def synced(func):
'''
Decorator for functions that should be called synchronously from another thread
:param func: function to call
'''
def wrapper(self, *args, **kwargs):
'''
Actual wrapper for the synchronous function
'''
task = DataManagerTask(func, *args, **kwargs)
self.submit_task(task)
return task.get_results()
return wrapper | python | {
"resource": ""
} |
q36914 | DataManagerTask.execute | train | def execute(self, dataman):
'''
run the task
:type dataman: :class:`~kitty.data.data_manager.DataManager`
:param dataman: the executing data manager
'''
self._event.clear()
try:
self._result = self._task(dataman, *self._args)
#
# We are going to re-throw this exception from get_results,
# so we are doing such a general eception handling at the point.
# however, we do want to print it here as well
#
except Exception as ex: # pylint: disable=W0703
self._exception = ex
KittyObject.get_logger().error(traceback.format_exc())
self._event.set() | python | {
"resource": ""
} |
q36915 | DataManager.open | train | def open(self):
'''
open the database
'''
self._connection = sqlite3.connect(self._dbname)
self._cursor = self._connection.cursor()
self._session_info = SessionInfoTable(self._connection, self._cursor)
self._reports = ReportsTable(self._connection, self._cursor) | python | {
"resource": ""
} |
q36916 | DataManager.set | train | def set(self, key, data):
'''
set arbitrary data by key in volatile memory
:param key: key of the data
:param data: data to be stored
'''
if isinstance(data, dict):
self._volatile_data[key] = {k: v for (k, v) in data.items()}
else:
self._volatile_data[key] = data | python | {
"resource": ""
} |
q36917 | Table.row_to_dict | train | def row_to_dict(self, row):
'''
translate a row of the current table to dictionary
:param row: a row of the current table (selected with \\*)
:return: dictionary of all fields
'''
res = {}
for i in range(len(self._fields)):
res[self._fields[i][0]] = row[i]
return res | python | {
"resource": ""
} |
q36918 | MainGUI.register_palette | train | def register_palette(self):
"""Converts pygmets style to urwid palatte"""
default = 'default'
palette = list(self.palette)
mapping = CONFIG['rgb_to_short']
for tok in self.style.styles.keys():
for t in tok.split()[::-1]:
st = self.style.styles[t]
if '#' in st:
break
if '#' not in st:
st = ''
st = st.split()
st.sort() # '#' comes before '[A-Za-z0-9]'
if len(st) == 0:
c = default
elif st[0].startswith('bg:'):
c = default
elif len(st[0]) == 7:
c = 'h' + rgb_to_short(st[0][1:], mapping)[0]
elif len(st[0]) == 4:
c = 'h' + rgb_to_short(st[0][1]*2 + st[0][2]*2 + st[0][3]*2, mapping)[0]
else:
c = default
a = urwid.AttrSpec(c, default, colors=256)
row = (tok, default, default, default, a.foreground, default)
palette.append(row)
self.loop.screen.register_palette(palette) | python | {
"resource": ""
} |
q36919 | Permutations.increment | train | def increment(self):
""" Increment the last permutation we returned to the next. """
# Increment position from the deepest place of the tree first.
for index in reversed(range(self.depth)):
self.indexes[index] += 1
# We haven't reached the end of board, no need to adjust upper
# level.
if self.indexes[index] < self.range_size:
break
# We've reached the end of board. Reset current level and increment
# the upper level.
self.indexes[index] = 0
# Now that we incremented our indexes, we need to deduplicate positions
# shering the same UIDs, by aligning piece's indexes to their parents.
# This works thanks to the sort performed on self.pieces
# initialization. See #7.
for i in range(self.depth - 1):
if (self.pieces[i] == self.pieces[i + 1]) and (
self.indexes[i] > self.indexes[i + 1]):
self.indexes[i + 1] = self.indexes[i] | python | {
"resource": ""
} |
q36920 | Permutations.skip_branch | train | def skip_branch(self, level):
""" Abandon the branch at the provided level and skip to the next.
When we call out to skip to the next branch of the search space, we
push sublevel pieces to the maximum positions of the board. So that the
next time the permutation iterator is called, it can produce the vector
state of the next adjacent branch. See #3.
"""
for i in range(level + 1, self.depth):
self.indexes[i] = self.range_size - 1 | python | {
"resource": ""
} |
q36921 | SolverContext.solve | train | def solve(self):
""" Solve all possible positions of pieces within the context.
Depth-first, tree-traversal of the product space.
"""
# Create a new, empty board.
board = Board(self.length, self.height)
# Iterate through all combinations of positions.
permutations = Permutations(self.pieces, self.vector_size)
for positions in permutations:
# Reuse board but flush all pieces.
board.reset()
for level, (piece_uid, linear_position) in enumerate(positions):
# Try to place the piece on the board.
try:
board.add(piece_uid, linear_position)
# If one of the piece can't be added, throw the whole set, skip
# the rotten branch and proceed to the next.
except (OccupiedPosition, VulnerablePosition, AttackablePiece):
permutations.skip_branch(level)
break
else:
# All pieces fits, save solution and proceeed to the next
# permutation.
self.result_counter += 1
yield board | python | {
"resource": ""
} |
q36922 | get_flagged_names | train | def get_flagged_names():
"""Return a list of all filenames marked as flagged."""
l = []
for w in _widget_cache.values():
if w.flagged:
l.append(w.get_node().get_value())
return l | python | {
"resource": ""
} |
q36923 | starts_expanded | train | def starts_expanded(name):
"""Return True if directory is a parent of initial cwd."""
if name is '/':
return True
l = name.split(dir_sep())
if len(l) > len(_initial_cwd):
return False
if l != _initial_cwd[:len(l)]:
return False
return True | python | {
"resource": ""
} |
q36924 | escape_filename_sh | train | def escape_filename_sh(name):
"""Return a hopefully safe shell-escaped version of a filename."""
# check whether we have unprintable characters
for ch in name:
if ord(ch) < 32:
# found one so use the ansi-c escaping
return escape_filename_sh_ansic(name)
# all printable characters, so return a double-quoted version
name.replace('\\','\\\\')
name.replace('"','\\"')
name.replace('`','\\`')
name.replace('$','\\$')
return '"'+name+'"' | python | {
"resource": ""
} |
q36925 | escape_filename_sh_ansic | train | def escape_filename_sh_ansic(name):
"""Return an ansi-c shell-escaped version of a filename."""
out =[]
# gather the escaped characters into a list
for ch in name:
if ord(ch) < 32:
out.append("\\x%02x"% ord(ch))
elif ch == '\\':
out.append('\\\\')
else:
out.append(ch)
# slap them back together in an ansi-c quote $'...'
return "$'" + "".join(out) + "'" | python | {
"resource": ""
} |
q36926 | FlagFileWidget.keypress | train | def keypress(self, size, key):
"""allow subclasses to intercept keystrokes"""
key = self.__super.keypress(size, key)
if key:
key = self.unhandled_keys(size, key)
return key | python | {
"resource": ""
} |
q36927 | FlagFileWidget.update_w | train | def update_w(self):
"""Update the attributes of self.widget based on self.flagged.
"""
if self.flagged:
self._w.attr = 'flagged'
self._w.focus_attr = 'flagged focus'
else:
self._w.attr = 'body'
self._w.focus_attr = 'focus' | python | {
"resource": ""
} |
q36928 | DirectoryNode.load_child_node | train | def load_child_node(self, key):
"""Return either a FileNode or DirectoryNode"""
index = self.get_child_index(key)
if key is None:
return EmptyNode(None)
else:
path = os.path.join(self.get_value(), key)
if index < self.dir_count:
return DirectoryNode(path, self.display, parent=self)
else:
path = os.path.join(self.get_value(), key)
return FileNode(path, self.display, parent=self) | python | {
"resource": ""
} |
q36929 | PasswordForm.clean_password | train | def clean_password(self):
"""
Validates that the password is a current password
"""
user_pass = self.cleaned_data.get('password')
matches = Password.objects.filter(password=user_pass)
if not matches:
raise forms.ValidationError("Your password does not match.") | python | {
"resource": ""
} |
q36930 | gfm | train | def gfm(text):
"""Processes Markdown according to GitHub Flavored Markdown spec."""
extractions = {}
def extract_pre_block(matchobj):
match = matchobj.group(0)
hashed_match = hashlib.md5(match.encode('utf-8')).hexdigest()
extractions[hashed_match] = match
result = "{gfm-extraction-%s}" % hashed_match
return result
def escape_underscore(matchobj):
match = matchobj.group(0)
if match.count('_') > 1:
return re.sub('_', '\_', match)
else:
return match
def newlines_to_brs(matchobj):
match = matchobj.group(0)
if re.search("\n{2}", match):
return match
else:
match = match.strip()
return match + " \n"
def insert_pre_block(matchobj):
string = "\n\n" + extractions[matchobj.group(1)]
return string
text = re.sub("(?s)<pre>.*?<\/pre>", extract_pre_block, text)
text = re.sub("(^(?! {4}|\t)\w+_\w+_\w[\w_]*)", escape_underscore, text)
text = re.sub("(?m)^[\w\<][^\n]*\n+", newlines_to_brs, text)
text = re.sub("\{gfm-extraction-([0-9a-f]{32})\}", insert_pre_block, text)
return text | python | {
"resource": ""
} |
q36931 | markdown | train | def markdown(text):
"""Processes GFM then converts it to HTML."""
text = gfm(text)
text = markdown_lib.markdown(text)
return text | python | {
"resource": ""
} |
q36932 | Command.add_log_options | train | def add_log_options(self, verbose_func=None, quiet_func=None):
"""
A helper for setting up log options
"""
if not verbose_func:
def verbose_func():
return log.config(verbose=True)
if not quiet_func:
def quiet_func():
return log.config(quiet=True)
self.option('-v, --verbose', 'show more logs', verbose_func)
self.option('-q, --quiet', 'show less logs', quiet_func)
return self | python | {
"resource": ""
} |
q36933 | BGP.redistribute | train | def redistribute(self, **kwargs):
"""Set BGP redistribute properties.
Args:
vrf (str): The VRF for this BGP process.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
source (str): Source for redistributing. (connected)
afi (str): Address family to configure. (ipv4, ipv6)
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `source` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.redistribute(source='connected',
... rbridge_id='225')
... output = dev.bgp.redistribute(source='connected',
... rbridge_id='225', get=True)
... output = dev.bgp.redistribute(source='connected',
... rbridge_id='225', delete=True)
... dev.bgp.redistribute() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
... dev.bgp.redistribute(source='connected', rbridge_id='225',
... afi='hodor') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
... dev.bgp.redistribute(source='hodor', rbridge_id='225',
... afi='ipv4') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
"""
# This method is the same as the base method except for one place.
# The class doesn't inherit from the base class, though, so we have
# to duplicate.
source = kwargs.pop('source')
afi = kwargs.pop('afi', 'ipv4')
callback = kwargs.pop('callback', self._callback)
if afi not in ['ipv4', 'ipv6']:
raise AttributeError('Invalid AFI.')
args = dict(rbridge_id=kwargs.pop('rbridge_id', '1'),
afi=afi, source=source)
redistribute = self._redistribute_builder(afi=afi, source=source)
config = redistribute(**args)
if kwargs.pop('get', False):
return callback(config, handler='get_config')
if kwargs.pop('delete', False):
tag = 'redistribute-%s' % source
config.find('.//*%s' % tag).set('operation', 'delete')
return callback(config) | python | {
"resource": ""
} |
q36934 | BGP._redistribute_builder | train | def _redistribute_builder(self, afi='ipv4', source=None):
"""Build BGP redistribute method.
Do not use this method directly. You probably want ``redistribute``.
Args:
source (str): Source for redistributing. (connected)
afi (str): Address family to configure. (ipv4, ipv6)
Returns:
Method to redistribute desired source.
Raises:
KeyError: if `source` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp._redistribute_builder(source='connected',
... afi='ipv4')
... dev.bgp._redistribute_builder(source='hodor',
... afi='ipv4') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
"""
if source == 'connected':
return getattr(self._rbridge,
'rbridge_id_router_router_bgp_address_family_{0}_'
'{0}_unicast_default_vrf_af_{0}_uc_and_vrf_cmds_'
'call_point_holder_redistribute_connected_'
'redistribute_connected'.format(afi))
# TODO: Add support for 'static' and 'ospf'
else:
raise AttributeError('Invalid source.') | python | {
"resource": ""
} |
q36935 | BGP.max_paths | train | def max_paths(self, **kwargs):
"""Set BGP max paths property.
Args:
vrf (str): The VRF for this BGP process.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
paths (str): Number of paths for BGP ECMP (default: 8).
afi (str): Address family to configure. (ipv4, ipv6)
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
``AttributeError``: When `afi` is not one of ['ipv4', 'ipv6']
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.max_paths(paths='8',
... rbridge_id='225')
... output = dev.bgp.max_paths(paths='8',
... rbridge_id='225', get=True)
... output = dev.bgp.max_paths(paths='8',
... rbridge_id='225', delete=True)
... output = dev.bgp.max_paths(paths='8', afi='ipv6',
... rbridge_id='225')
... output = dev.bgp.max_paths(paths='8', afi='ipv6',
... rbridge_id='225', get=True)
... output = dev.bgp.max_paths(paths='8', afi='ipv6',
... rbridge_id='225', delete=True)
... output = dev.bgp.max_paths(paths='8', afi='ipv5',
... rbridge_id='225') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
"""
afi = kwargs.pop('afi', 'ipv4')
callback = kwargs.pop('callback', self._callback)
if afi not in ['ipv4', 'ipv6']:
raise AttributeError('Invalid AFI.')
args = dict(rbridge_id=kwargs.pop('rbridge_id', '1'),
load_sharing_value=kwargs.pop('paths', '8'))
max_paths = getattr(self._rbridge,
'rbridge_id_router_router_bgp_address_family_{0}_'
'{0}_unicast_default_vrf_af_common_cmds_holder_'
'maximum_paths_load_sharing_value'.format(afi))
config = max_paths(**args)
if kwargs.pop('get', False):
return callback(config, handler='get_config')
if kwargs.pop('delete', False):
tag = 'maximum-paths'
config.find('.//*%s' % tag).set('operation', 'delete')
return callback(config) | python | {
"resource": ""
} |
q36936 | BGP._multihop_xml | train | def _multihop_xml(self, **kwargs):
"""Build BGP multihop XML.
Do not use this method directly. You probably want ``multihop``.
Args:
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
neighbor (ipaddress.ip_interface): `ip_interface` object containing
peer IP address (IPv4 or IPv6).
count (str): Number of hops to allow. (1-255)
Returns:
``ElementTree``: XML for configuring BGP multihop.
Raises:
KeyError: if any arg is not specified.
Examples:
>>> import pynos.device
>>> from ipaddress import ip_interface
>>> conn = ('10.24.39.230', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... dev.bgp._multihop_xml(neighbor=ip_interface(unicode(
... '10.10.10.10')), count='5', vrf='default', rbridge_id='1')
... dev.bgp._multihop_xml(
... ip='10.10.10.10') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
ip_addr = kwargs.pop('neighbor')
ip = str(ip_addr.ip)
rbr_ns = 'urn:brocade.com:mgmt:brocade-rbridge'
bgp_ns = 'urn:brocade.com:mgmt:brocade-bgp'
config = ET.Element('config')
ele = ET.SubElement(config, 'rbridge-id', xmlns=rbr_ns)
ET.SubElement(ele, 'rbridge-id').text = kwargs.pop('rbridge_id')
ele = ET.SubElement(ele, 'router')
ele = ET.SubElement(ele, 'router-bgp', xmlns=bgp_ns)
ele = ET.SubElement(ele, 'router-bgp-attributes')
ele = ET.SubElement(ele, 'neighbor')
if ip_addr.version == 4:
ele = ET.SubElement(ele, 'neighbor-ips')
ele = ET.SubElement(ele, 'neighbor-addr')
ET.SubElement(ele, 'router-bgp-neighbor-address').text = ip
else:
ele = ET.SubElement(ele, 'neighbor-ipv6s')
ele = ET.SubElement(ele, 'neighbor-ipv6-addr')
ET.SubElement(ele, 'router-bgp-neighbor-ipv6-address').text = ip
ele = ET.SubElement(ele, 'ebgp-multihop')
ET.SubElement(ele, 'ebgp-multihop-count').text = kwargs.pop('count')
return config | python | {
"resource": ""
} |
q36937 | return_xml | train | def return_xml(element_tree):
"""Return an XML Element.
Args:
element_tree (Element): XML Element to be returned. If sent as a
``str``, this function will attempt to convert it to an
``Element``.
Returns:
Element: An XML Element.
Raises:
TypeError: if `element_tree` is not of type ``Element`` and it
cannot be converted from a ``str``.
Examples:
>>> import pynos.utilities
>>> import xml.etree.ElementTree as ET
>>> ele = pynos.utilities.return_xml(ET.Element('config'))
>>> assert isinstance(ele, ET.Element)
>>> ele = pynos.utilities.return_xml('<config />')
>>> assert isinstance(ele, ET.Element)
>>> ele = pynos.utilities.return_xml(
... ['hodor']) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
TypeError
"""
if isinstance(element_tree, ET.Element):
return element_tree
try:
return ET.fromstring(element_tree)
except TypeError:
raise TypeError('{} takes either {} or {} type.'
.format(repr(return_xml.__name__),
repr(str.__name__),
repr(ET.Element.__name__))) | python | {
"resource": ""
} |
q36938 | valid_vlan_id | train | def valid_vlan_id(vlan_id, extended=True):
"""Validates a VLAN ID.
Args:
vlan_id (integer): VLAN ID to validate. If passed as ``str``, it will
be cast to ``int``.
extended (bool): If the VLAN ID range should be considered extended
for Virtual Fabrics.
Returns:
bool: ``True`` if it is a valid VLAN ID. ``False`` if not.
Raises:
None
Examples:
>>> import pynos.utilities
>>> vlan = '565'
>>> pynos.utilities.valid_vlan_id(vlan)
True
>>> extended = False
>>> vlan = '6789'
>>> pynos.utilities.valid_vlan_id(vlan, extended=extended)
False
>>> pynos.utilities.valid_vlan_id(vlan)
True
"""
minimum_vlan_id = 1
maximum_vlan_id = 4095
if extended:
maximum_vlan_id = 8191
return minimum_vlan_id <= int(vlan_id) <= maximum_vlan_id | python | {
"resource": ""
} |
q36939 | merge_xml | train | def merge_xml(first_doc, second_doc):
"""Merges two XML documents.
Args:
first_doc (str): First XML document. `second_doc` is merged into this
document.
second_doc (str): Second XML document. It is merged into the first.
Returns:
XML Document: The merged document.
Raises:
None
Example:
>>> import pynos.utilities
>>> import lxml
>>> import xml
>>> x = xml.etree.ElementTree.fromstring('<config />')
>>> y = lxml.etree.fromstring('<config><hello /></config>')
>>> x = pynos.utilities.merge_xml(x, y)
"""
# Adapted from:
# http://stackoverflow.com/questions/27258013/merge-two-xml-files-python
# Maps each elements tag to the element from the first document
if isinstance(first_doc, lxml.etree._Element):
first_doc = ET.fromstring(lxml.etree.tostring(first_doc))
if isinstance(second_doc, lxml.etree._Element):
second_doc = ET.fromstring(lxml.etree.tostring(second_doc))
mapping = {element.tag: element for element in first_doc}
for element in second_doc:
if not len(element):
# Recursed fully. This element has no children.
try:
# Update the first document's element's text
mapping[element.tag].text = element.text
except KeyError:
# The element doesn't exist
# add it to the mapping and the root document
mapping[element.tag] = element
first_doc.append(element)
else:
# This element has children. Recurse.
try:
merge_xml(mapping[element.tag], element)
except KeyError:
# The element doesn't exist
# add it to the mapping and the root document
mapping[element.tag] = element
first_doc.append(element)
return lxml.etree.fromstring(ET.tostring(first_doc)) | python | {
"resource": ""
} |
q36940 | FileSystemEvents.get_scss_files | train | def get_scss_files(self, skip_partials=True, with_source_path=False):
"""Gets all SCSS files in the source directory.
:param bool skip_partials: If True, partials will be ignored. Otherwise,
all SCSS files, including ones that begin
with '_' will be returned.
:param boom with_source_path: If true, the `source_path` will be added
to all of the paths. Otherwise, it will
be stripped.
:returns: A list of the SCSS files in the source directory
"""
scss_files = []
for root, dirs, files in os.walk(self._source_path):
for filename in fnmatch.filter(files, "*.scss"):
if filename.startswith("_") and skip_partials:
continue
full_path = os.path.join(root, filename)
if not with_source_path:
full_path = full_path.split(self._source_path)[1]
if full_path.startswith("/"):
full_path = full_path[1:]
scss_files.append(full_path)
return scss_files | python | {
"resource": ""
} |
q36941 | DatasetPostgreSQLIndex._index_document | train | def _index_document(self, document, force=False):
""" Adds dataset document to the index. """
query = text("""
INSERT INTO dataset_index(vid, title, keywords, doc)
VALUES(:vid, :title, string_to_array(:keywords, ' '), to_tsvector('english', :doc));
""")
self.execute(query, **document) | python | {
"resource": ""
} |
q36942 | PartitionPostgreSQLIndex.is_indexed | train | def is_indexed(self, partition):
""" Returns True if partition is already indexed. Otherwise returns False. """
query = text("""
SELECT vid
FROM partition_index
WHERE vid = :vid;
""")
result = self.execute(query, vid=partition.vid)
return bool(result.fetchall()) | python | {
"resource": ""
} |
q36943 | IdentifierPostgreSQLIndex.search | train | def search(self, search_phrase, limit=None):
""" Finds identifiers by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to return. None means without limit.
Returns:
list of IdentifierSearchResult instances.
"""
query_parts = [
'SELECT identifier, type, name, similarity(name, :word) AS sml',
'FROM identifier_index',
'WHERE name % :word',
'ORDER BY sml DESC, name']
query_params = {
'word': search_phrase}
if limit:
query_parts.append('LIMIT :limit')
query_params['limit'] = limit
query_parts.append(';')
query = text('\n'.join(query_parts))
self.backend.library.database.set_connection_search_path()
results = self.execute(query, **query_params).fetchall()
for result in results:
vid, type, name, score = result
yield IdentifierSearchResult(
score=score, vid=vid,
type=type, name=name) | python | {
"resource": ""
} |
q36944 | pare | train | def pare(text, size, etc='...'):
'''Pare text to have maximum size and add etc to the end if it's
changed'''
size = int(size)
text = text.strip()
if len(text)>size:
# strip the last word or not
to_be_stripped = not whitespace_re.findall(text[size-1:size+2])
text = text[:size]
if to_be_stripped:
half = size//2
last = None
for mo in whitespace_re.finditer(text[half:]):
last = mo
if last is not None:
text = text[:half+last.start()+1]
return text.rstrip() + etc
else:
return text | python | {
"resource": ""
} |
q36945 | get_environment | train | def get_environment(id=None, name=None):
"""
Get a specific Environment by name or ID
"""
data = get_environment_raw(id, name)
if data:
return utils.format_json(data) | python | {
"resource": ""
} |
q36946 | list_environments_raw | train | def list_environments_raw(page_size=200, page_index=0, sort="", q=""):
"""
List all Environments
"""
response = utils.checked_api_call(pnc_api.environments, 'get_all', page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response.content | python | {
"resource": ""
} |
q36947 | Table.primary_dimensions | train | def primary_dimensions(self):
"""Iterate over the primary dimension columns, columns which do not have a parent
"""
from ambry.valuetype.core import ROLE
for c in self.columns:
if not c.parent and c.role == ROLE.DIMENSION:
yield c | python | {
"resource": ""
} |
q36948 | Table.primary_measures | train | def primary_measures(self):
"""Iterate over the primary columns, columns which do not have a parent
Also sets the property partition_stats to the stats collection for the partition and column.
"""
from ambry.valuetype.core import ROLE
for c in self.columns:
if not c.parent and c.role == ROLE.MEASURE:
yield c | python | {
"resource": ""
} |
q36949 | Table.is_empty | train | def is_empty(self):
"""Return True if the table has no columns or the only column is the id"""
if len(self.columns) == 0:
return True
if len(self.columns) == 1 and self.columns[0].name == 'id':
return True
return False | python | {
"resource": ""
} |
q36950 | Table.update_from_stats | train | def update_from_stats(self, stats):
"""Update columns based on partition statistics"""
sd = dict(stats)
for c in self.columns:
if c not in sd:
continue
stat = sd[c]
if stat.size and stat.size > c.size:
c.size = stat.size
c.lom = stat.lom | python | {
"resource": ""
} |
q36951 | Table.transforms | train | def transforms(self):
"""Return an array of arrays of column transforms.
#The return value is an list of list, with each list being a segment of column transformations, and
#each segment having one entry per column.
"""
tr = []
for c in self.columns:
tr.append(c.expanded_transform)
return six.moves.zip_longest(*tr) | python | {
"resource": ""
} |
q36952 | Table.before_insert | train | def before_insert(mapper, conn, target):
"""event.listen method for Sqlalchemy to set the seqience_id for this
object and create an ObjectNumber value for the id"""
if target.sequence_id is None:
from ambry.orm.exc import DatabaseError
raise DatabaseError('Must have sequence id before insertion')
Table.before_update(mapper, conn, target) | python | {
"resource": ""
} |
q36953 | Table.before_update | train | def before_update(mapper, conn, target):
"""Set the Table ID based on the dataset number and the sequence number
for the table."""
target.name = Table.mangle_name(target.name)
if isinstance(target, Column):
raise TypeError('Got a column instead of a table')
target.update_id(target.sequence_id, False) | python | {
"resource": ""
} |
q36954 | SessionManager.wait_for_tasks | train | def wait_for_tasks(self, raise_if_error=True):
"""
Wait for the running tasks lauched from the sessions.
Note that it also wait for tasks that are started from other tasks
callbacks, like on_finished.
:param raise_if_error: if True, raise all possible encountered
errors using :class:`TaskErrors`. Else the errors are returned
as a list.
"""
errors = []
tasks_seen = TaskCache()
while True:
for session in self.values():
errs = session.wait_for_tasks(raise_if_error=False)
errors.extend(errs)
# look for tasks created after the wait (in callbacks of
# tasks from different sessions)
tasks = []
for session in self.values():
tasks.extend(session.tasks())
# if none, then just break - else loop to wait for them
if not any(t for t in tasks if t not in tasks_seen):
break
if raise_if_error and errors:
raise TaskErrors(errors)
return errors | python | {
"resource": ""
} |
q36955 | CommandTask.error | train | def error(self):
"""
Return an instance of Exception if any, else None.
Actually check for a :class:`TimeoutError` or a
:class:`ExitCodeError`.
"""
if self.__timed_out:
return TimeoutError(self.session, self, "timeout")
if self.__exit_code is not None and \
self.__expected_exit_code is not None and \
self.__exit_code != self.__expected_exit_code:
return ExitCodeError(self.session, self,
'bad exit code: Got %s' % self.__exit_code) | python | {
"resource": ""
} |
q36956 | Dataset.incver | train | def incver(self):
"""Increment all of the version numbers"""
d = {}
for p in self.__mapper__.attrs:
if p.key in ['vid','vname','fqname', 'version', 'cache_key']:
continue
if p.key == 'revision':
d[p.key] = self.revision + 1
else:
d[p.key] = getattr(self, p.key)
n = Dataset(**d)
return n | python | {
"resource": ""
} |
q36957 | Dataset.new_unique_object | train | def new_unique_object(self, table_class, sequence_id=None, force_query=False, **kwargs):
"""Use next_sequence_id to create a new child of the dataset, with a unique id"""
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import FlushError
# If a sequence ID was specified, the caller is certain
# that there is no potential for conflicts,
# so there is no need to commit here.
if not sequence_id:
commit = True
sequence_id = self.next_sequence_id(table_class, force_query=force_query)
else:
commit = False
o = table_class(
d_vid=self.vid,
**kwargs
)
o.update_id(sequence_id)
if commit is False:
return o
self.commit()
if self._database.driver == 'sqlite':
# The Sqlite database can't have concurrency, so there no problem.
self.session.add(o)
self.commit()
return o
else: # Postgres. Concurrency is a bitch.
table_name = table_class.__tablename__
child_sequence_id = table_class.sequence_id.property.columns[0].name
try:
self.session.add(o)
self.commit()
return o
except (IntegrityError, FlushError) as e:
self.rollback()
self.session.merge(self)
print 'Failed'
return None
return
# This is horrible, but it's the only thing that has worked for both
# Sqlite and Postgres in both single processes and multiprocesses.
d_vid = self.vid
while True:
try:
self.session.add(o)
self.commit()
return o
except (IntegrityError, FlushError) as e:
self.rollback()
self.session.expunge_all()
ds = self._database.dataset(d_vid)
sequence_id = ds.next_sequence_id(table_class, force_query=True)
o.update_id(sequence_id)
except Exception as e:
print('Completely failed to get a new {} sequence_id; {}'.format(table_class, e))
self.rollback()
import traceback
# This bit is helpful in a multiprocessing run.
tb = traceback.format_exc()
print(tb)
raise | python | {
"resource": ""
} |
q36958 | Dataset.new_table | train | def new_table(self, name, add_id=True, **kwargs):
'''Add a table to the schema, or update it it already exists.
If updating, will only update data.
'''
from . import Table
from .exc import NotFoundError
try:
table = self.table(name)
extant = True
except NotFoundError:
extant = False
if 'sequence_id' not in kwargs:
kwargs['sequence_id'] = self._database.next_sequence_id(Dataset, self.vid, Table)
table = Table(name=name, d_vid=self.vid, **kwargs)
table.update_id()
# Update possibly extant data
table.data = dict(
(list(table.data.items()) if table.data else []) + list(kwargs.get('data', {}).items()))
for key, value in list(kwargs.items()):
if not key:
continue
if key[0] != '_' and key not in ['vid', 'id', 'id_', 'd_id', 'name', 'sequence_id', 'table', 'column', 'data']:
setattr(table, key, value)
if add_id:
table.add_id_column()
if not extant:
self.tables.append(table)
return table | python | {
"resource": ""
} |
q36959 | Dataset.new_partition | train | def new_partition(self, table, **kwargs):
""" Creates new partition and returns it.
Args:
table (orm.Table):
Returns:
orm.Partition
"""
from . import Partition
# Create the basic partition record, with a sequence ID.
if isinstance(table, string_types):
table = self.table(table)
if 'sequence_id' in kwargs:
sequence_id = kwargs['sequence_id']
del kwargs['sequence_id']
else:
sequence_id = self._database.next_sequence_id(Dataset, self.vid, Partition)
p = Partition(
t_vid=table.vid,
table_name=table.name,
sequence_id=sequence_id,
dataset=self,
d_vid=self.vid,
**kwargs
)
p.update_id()
return p | python | {
"resource": ""
} |
q36960 | Dataset.partition | train | def partition(self, ref=None, **kwargs):
""" Returns partition by ref. """
from .exc import NotFoundError
from six import text_type
if ref:
for p in self.partitions: # This is slow for large datasets, like Census years.
if (text_type(ref) == text_type(p.name) or text_type(ref) == text_type(p.id) or
text_type(ref) == text_type(p.vid)):
return p
raise NotFoundError("Failed to find partition for ref '{}' in dataset '{}'".format(ref, self.name))
elif kwargs:
from ..identity import PartitionNameQuery
pnq = PartitionNameQuery(**kwargs)
return self._find_orm | python | {
"resource": ""
} |
q36961 | Dataset.bsfile | train | def bsfile(self, path):
"""Return a Build Source file ref, creating a new one if the one requested does not exist"""
from sqlalchemy.orm.exc import NoResultFound
from ambry.orm.exc import NotFoundError
try:
f = object_session(self)\
.query(File)\
.filter(File.d_vid == self.vid)\
.filter(File.major_type == File.MAJOR_TYPE.BUILDSOURCE)\
.filter(File.path == path)\
.one()
return f
except NoResultFound:
raise NotFoundError("Failed to find file for path '{}' ".format(path)) | python | {
"resource": ""
} |
q36962 | Dataset.row | train | def row(self, fields):
"""Return a row for fields, for CSV files, pretty printing, etc, give a set of fields to return"""
d = self.dict
row = [None] * len(fields)
for i, f in enumerate(fields):
if f in d:
row[i] = d[f]
return row | python | {
"resource": ""
} |
q36963 | ConfigAccessor.metadata | train | def metadata(self):
"""Access process configuarion values as attributes. """
from ambry.metadata.schema import Top # cross-module import
top = Top()
top.build_from_db(self.dataset)
return top | python | {
"resource": ""
} |
q36964 | ConfigAccessor.rows | train | def rows(self):
"""Return configuration in a form that can be used to reconstitute a
Metadata object. Returns all of the rows for a dataset.
This is distinct from get_config_value, which returns the value
for the library.
"""
from ambry.orm import Config as SAConfig
from sqlalchemy import or_
rows = []
configs = self.dataset.session\
.query(SAConfig)\
.filter(or_(SAConfig.group == 'config', SAConfig.group == 'process'),
SAConfig.d_vid == self.dataset.vid)\
.all()
for r in configs:
parts = r.key.split('.', 3)
if r.group == 'process':
parts = ['process'] + parts
cr = ((parts[0] if len(parts) > 0 else None,
parts[1] if len(parts) > 1 else None,
parts[2] if len(parts) > 2 else None
), r.value)
rows.append(cr)
return rows | python | {
"resource": ""
} |
q36965 | _set_value | train | def _set_value(instance_to_path_map, path_to_instance_map, prop_tree, config_instance):
""" Finds appropriate term in the prop_tree and sets its value from config_instance.
Args:
configs_map (dict): key is id of the config, value is Config instance (AKA cache of the configs)
prop_tree (PropertyDictTree): poperty tree to populate.
config_instance (Config):
"""
path = instance_to_path_map[config_instance]
# find group
group = prop_tree
for elem in path[:-1]:
group = getattr(group, elem)
assert group._key == config_instance.parent.key
setattr(group, config_instance.key, config_instance.value)
#
# bind config to the term
#
# FIXME: Make all the terms to store config instance the same way.
term = getattr(group, config_instance.key)
try:
if hasattr(term, '_term'):
# ScalarTermS and ScalarTermU case
term._term._config = config_instance
return
except KeyError:
# python3 case. TODO: Find the way to make it simple.
pass
try:
if hasattr(term, '_config'):
term._config = config_instance
return
except KeyError:
# python3 case. TODO: Find the way to make it simple.
pass
else:
pass | python | {
"resource": ""
} |
q36966 | get_or_create | train | def get_or_create(session, model, **kwargs):
""" Get or create sqlalchemy instance.
Args:
session (Sqlalchemy session):
model (sqlalchemy model):
kwargs (dict): kwargs to lookup or create instance.
Returns:
Tuple: first element is found or created instance, second is boolean - True if instance created,
False if instance found.
"""
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance, False
else:
instance = model(**kwargs)
if 'dataset' in kwargs:
instance.update_sequence_id(session, kwargs['dataset'])
session.add(instance)
session.commit()
return instance, True | python | {
"resource": ""
} |
q36967 | _get_config_instance | train | def _get_config_instance(group_or_term, session, **kwargs):
""" Finds appropriate config instance and returns it.
Args:
group_or_term (Group or Term):
session (Sqlalchemy session):
kwargs (dict): kwargs to pass to get_or_create.
Returns:
tuple of (Config, bool):
"""
path = group_or_term._get_path()
cached = group_or_term._top._cached_configs.get(path)
if cached:
config = cached
created = False
else:
# does not exist or not yet cached
config, created = get_or_create(session, Config, **kwargs)
return config, created | python | {
"resource": ""
} |
q36968 | StructuredPropertyTree.register_members | train | def register_members(self):
"""Collect the names of the class member and convert them to object
members.
Unlike Terms, the Group class members are converted into object
members, so the configuration data
"""
self._members = {
name: attr for name, attr in iteritems(type(self).__dict__) if isinstance(attr, Group)}
for name, m in iteritems(self._members):
m.init_descriptor(name, self) | python | {
"resource": ""
} |
q36969 | StructuredPropertyTree.add_error | train | def add_error(self, group, term, sub_term, value):
"""For records that are not defined as terms, either add it to the
errors list."""
self._errors[(group, term, sub_term)] = value | python | {
"resource": ""
} |
q36970 | StructuredPropertyTree._jinja_sub | train | def _jinja_sub(self, st):
"""Create a Jina template engine, then perform substitutions on a string"""
if isinstance(st, string_types):
from jinja2 import Template
try:
for i in range(5): # Only do 5 recursive substitutions.
st = Template(st).render(**(self._top.dict))
if '{{' not in st:
break
return st
except Exception as e:
return st
#raise ValueError(
# "Failed to render jinja template for metadata value '{}': {}".format(st, e))
return st | python | {
"resource": ""
} |
q36971 | StructuredPropertyTree.scalar_term | train | def scalar_term(self, st):
"""Return a _ScalarTermS or _ScalarTermU from a string, to perform text and HTML substitutions"""
if isinstance(st, binary_type):
return _ScalarTermS(st, self._jinja_sub)
elif isinstance(st, text_type):
return _ScalarTermU(st, self._jinja_sub)
elif st is None:
return _ScalarTermU(u(''), self._jinja_sub)
else:
return st | python | {
"resource": ""
} |
q36972 | Group.update_config | train | def update_config(self):
""" Updates or creates config of that group. Requires tree bound to db. """
dataset = self._top._config.dataset
session = object_session(self._top._config)
logger.debug(
'Updating group config. dataset: {}, type: {}, key: {}'.format(dataset.vid, self._top._type, self._key))
self._config, created = _get_config_instance(
self, session,
parent_id=self._parent._config.id, d_vid=dataset.vid,
group=self._key, key=self._key, type=self._top._type, dataset = dataset)
if created:
self._top._cached_configs[self._get_path()] = self._config
self._top._add_valid(self._config)
if created:
logger.debug(
'New group config created and linked. config: {}'.format(self._config))
else:
logger.debug(
'Existing group config linked. config: {}'.format(self._config)) | python | {
"resource": ""
} |
q36973 | Group.get_group_instance | train | def get_group_instance(self, parent):
"""Create an instance object"""
o = copy.copy(self)
o.init_instance(parent)
return o | python | {
"resource": ""
} |
q36974 | VarDictGroup.update_config | train | def update_config(self, key, value):
""" Creates or updates db config of the VarDictGroup. Requires bound to db tree. """
dataset = self._top._config.dataset
session = object_session(self._top._config)
logger.debug(
'Updating VarDictGroup config. dataset: {}, type: {}, key: {}, value: {}'.format(
dataset, self._top._type, key, value))
if not self._parent._config:
self._parent.update_config()
# create or update group config
self._config, created = get_or_create(
session, Config,
d_vid=dataset.vid, type=self._top._type,
parent=self._parent._config, group=self._key,
key=self._key,dataset=dataset)
self._top._add_valid(self._config)
# create or update value config
config, created = get_or_create(
session, Config, parent=self._config, d_vid=dataset.vid,
type=self._top._type, key=key,dataset=dataset)
if config.value != value:
# sync db value with term value.
config.value = value
session.merge(config)
session.commit()
logger.debug(
'Config bound to the VarDictGroup key updated. config: {}'.format(config))
self._top._add_valid(config) | python | {
"resource": ""
} |
q36975 | Term.update_config | train | def update_config(self):
""" Creates or updates db config of the term. Requires bound to db tree. """
dataset = self._top._config.dataset
session = object_session(self._top._config)
#logger.debug('Updating term config. dataset: {}, type: {}, key: {}, value: {}'.format(
# dataset, self._top._type, self._key, self.get()))
if not self._parent._config:
self._parent.update_config()
self._config, created = _get_config_instance(
self, session,
parent=self._parent._config, d_vid=dataset.vid,
type=self._top._type, key=self._key, dataset=dataset)
if created:
self._top._cached_configs[self._get_path()] = self._config
# We update ScalarTerm and ListTerm values only. Composite terms (DictTerm for example)
# should not contain value.
if isinstance(self, (ScalarTerm, ListTerm)):
if self._config.value != self.get():
self._config.value = self.get()
session.merge(self._config)
session.commit()
self._top._add_valid(self._config) | python | {
"resource": ""
} |
q36976 | _ScalarTermS.text | train | def text(self):
"""Interpret the scalar as Markdown, strip the HTML and return text"""
s = MLStripper()
s.feed(self.html)
return s.get_data() | python | {
"resource": ""
} |
q36977 | quoteattrs | train | def quoteattrs(data):
'''Takes dict of attributes and returns their HTML representation'''
items = []
for key, value in data.items():
items.append('{}={}'.format(key, quoteattr(value)))
return ' '.join(items) | python | {
"resource": ""
} |
q36978 | quote_js | train | def quote_js(text):
'''Quotes text to be used as JavaScript string in HTML templates. The
result doesn't contain surrounding quotes.'''
if isinstance(text, six.binary_type):
text = text.decode('utf-8') # for Jinja2 Markup
text = text.replace('\\', '\\\\');
text = text.replace('\n', '\\n');
text = text.replace('\r', '');
for char in '\'"<>&':
text = text.replace(char, '\\x{:02x}'.format(ord(char)))
return text | python | {
"resource": ""
} |
q36979 | create_ramp_plan | train | def create_ramp_plan(err, ramp):
"""
Formulate and execute on a plan to slowly add heat or cooling to the system
`err` initial error (PV - SP)
`ramp` the size of the ramp
A ramp plan might yield MVs in this order at every timestep:
[5, 0, 4, 0, 3, 0, 2, 0, 1]
where err == 5 + 4 + 3 + 2 + 1
"""
if ramp == 1: # basecase
yield int(err)
while True:
yield 0
# np.arange(n).sum() == err
# --> solve for n
# err = (n - 1) * (n // 2) == .5 * n**2 - .5 * n
# 0 = n**2 - n --> solve for n
n = np.abs(np.roots([.5, -.5, 0]).max())
niter = int(ramp // (2 * n)) # 2 means add all MV in first half of ramp
MV = n
log.info('Initializing a ramp plan', extra=dict(
ramp_size=ramp, err=err, niter=niter))
for x in range(int(n)):
budget = MV
for x in range(niter):
budget -= MV // niter
yield int(np.sign(err) * (MV // niter))
yield int(budget * np.sign(err))
MV -= 1
while True:
yield 0 | python | {
"resource": ""
} |
q36980 | BaseField.clean_value | train | def clean_value(self):
'''
Current field's converted value from form's python_data.
'''
# XXX cached_property is used only for set initial state
# this property should be set every time field data
# has been changed, for instance, in accept method
python_data = self.parent.python_data
if self.name in python_data:
return python_data[self.name]
return self.get_initial() | python | {
"resource": ""
} |
q36981 | Field.accept | train | def accept(self):
'''Extracts raw value from form's raw data and passes it to converter'''
value = self.raw_value
if not self._check_value_type(value):
# XXX should this be silent or TypeError?
value = [] if self.multiple else self._null_value
self.clean_value = self.conv.accept(value)
return {self.name: self.clean_value} | python | {
"resource": ""
} |
q36982 | AggregateField.python_data | train | def python_data(self):
'''Representation of aggregate value as dictionary.'''
try:
value = self.clean_value
except LookupError:
# XXX is this necessary?
value = self.get_initial()
return self.from_python(value) | python | {
"resource": ""
} |
q36983 | FieldSet.accept | train | def accept(self):
'''
Accepts all children fields, collects resulting values into dict and
passes that dict to converter.
Returns result of converter as separate value in parent `python_data`
'''
result = dict(self.python_data)
for field in self.fields:
if field.writable:
result.update(field.accept())
else:
# readonly field
field.set_raw_value(self.form.raw_data,
field.from_python(result[field.name]))
self.clean_value = self.conv.accept(result)
return {self.name: self.clean_value} | python | {
"resource": ""
} |
q36984 | FieldBlock.accept | train | def accept(self):
'''
Acts as `Field.accepts` but returns result of every child field
as value in parent `python_data`.
'''
result = FieldSet.accept(self)
self.clean_value = result[self.name]
return self.clean_value | python | {
"resource": ""
} |
q36985 | getTicker | train | def getTicker(pair, connection=None, info=None):
"""Retrieve the ticker for the given pair. Returns a Ticker instance."""
if info is not None:
info.validate_pair(pair)
if connection is None:
connection = common.BTCEConnection()
response = connection.makeJSONRequest("/api/3/ticker/%s" % pair)
if type(response) is not dict:
raise TypeError("The response is a %r, not a dict." % type(response))
elif u'error' in response:
print("There is a error \"%s\" while obtaining ticker %s" % (response['error'], pair))
ticker = None
else:
ticker = Ticker(**response[pair])
return ticker | python | {
"resource": ""
} |
q36986 | getTradeHistory | train | def getTradeHistory(pair, connection=None, info=None, count=None):
"""Retrieve the trade history for the given pair. Returns a list of
Trade instances. If count is not None, it should be an integer, and
specifies the number of items from the trade history that will be
processed and returned."""
if info is not None:
info.validate_pair(pair)
if connection is None:
connection = common.BTCEConnection()
response = connection.makeJSONRequest("/api/3/trades/%s" % pair)
if type(response) is not dict:
raise TypeError("The response is not a dict.")
history = response.get(pair)
if type(history) is not list:
raise TypeError("The response is a %r, not a list." % type(history))
result = []
# Limit the number of items returned if requested.
if count is not None:
history = history[:count]
for h in history:
h["pair"] = pair
t = Trade(**h)
result.append(t)
return result | python | {
"resource": ""
} |
q36987 | BuildSourceFile.remove | train | def remove(self):
""" Removes file from filesystem. """
from fs.errors import ResourceNotFoundError
try:
self._fs.remove(self.file_name)
except ResourceNotFoundError:
pass | python | {
"resource": ""
} |
q36988 | BuildSourceFile.sync | train | def sync(self, force=None):
"""Synchronize between the file in the file system and the field record"""
try:
if force:
sd = force
else:
sd = self.sync_dir()
if sd == self.SYNC_DIR.FILE_TO_RECORD:
if force and not self.exists():
return None
self.fs_to_record()
elif sd == self.SYNC_DIR.RECORD_TO_FILE:
self.record_to_fs()
else:
return None
self._dataset.config.sync[self.file_const][sd] = time.time()
return sd
except Exception as e:
self._bundle.rollback()
self._bundle.error("Failed to sync '{}': {}".format(self.file_const, e))
raise | python | {
"resource": ""
} |
q36989 | DictBuildSourceFile.record_to_fh | train | def record_to_fh(self, f):
"""Write the record, in filesystem format, to a file handle or file object"""
fr = self.record
if fr.contents:
yaml.safe_dump(fr.unpacked_contents, f, default_flow_style=False, encoding='utf-8')
fr.source_hash = self.fs_hash
fr.modified = self.fs_modtime | python | {
"resource": ""
} |
q36990 | MetadataFile.objects_to_record | train | def objects_to_record(self):
"""Write from object metadata to the record. Note that we don't write everything"""
o = self.get_object()
o.about = self._bundle.metadata.about
o.identity = self._dataset.identity.ident_dict
o.names = self._dataset.identity.names_dict
o.contacts = self._bundle.metadata.contacts
self.set_object(o) | python | {
"resource": ""
} |
q36991 | MetadataFile.update_identity | train | def update_identity(self):
"""Update the identity and names to match the dataset id and version"""
fr = self.record
d = fr.unpacked_contents
d['identity'] = self._dataset.identity.ident_dict
d['names'] = self._dataset.identity.names_dict
fr.update_contents(msgpack.packb(d), 'application/msgpack') | python | {
"resource": ""
} |
q36992 | MetadataFile.get_object | train | def get_object(self):
"""Return contents in object form, an AttrDict"""
from ..util import AttrDict
c = self.record.unpacked_contents
if not c:
c = yaml.safe_load(self.default)
return AttrDict(c) | python | {
"resource": ""
} |
q36993 | NotebookFile.execute | train | def execute(self):
"""Convert the notebook to a python script and execute it, returning the local context
as a dict"""
from nbformat import read
from nbconvert.exporters import export_script
from cStringIO import StringIO
notebook = read(StringIO(self.record.unpacked_contents), 4)
script, resources = export_script(notebook)
env_dict = {}
exec (compile(script.replace('# coding: utf-8', ''), 'script', 'exec'), env_dict)
return env_dict | python | {
"resource": ""
} |
q36994 | PythonSourceFile.import_module | train | def import_module(self, module_path = 'ambry.build', **kwargs):
"""
Import the contents of the file into the ambry.build module
:param kwargs: items to add to the module globals
:return:
"""
from fs.errors import NoSysPathError
if module_path in sys.modules:
module = sys.modules[module_path]
else:
module = imp.new_module(module_path)
sys.modules[module_path] = module
bf = self.record
if not bf.contents:
return module
module.__dict__.update(**kwargs)
try:
abs_path = self._fs.getsyspath(self.file_name)
except NoSysPathError:
abs_path = '<string>'
import re
if re.search(r'-\*-\s+coding:', bf.contents):
# Has encoding, so don't decode
contents = bf.contents
else:
contents = bf.unpacked_contents # Assumes utf-8
exec(compile(contents, abs_path, 'exec'), module.__dict__)
return module | python | {
"resource": ""
} |
q36995 | PythonSourceFile.import_bundle | train | def import_bundle(self):
"""Add the filesystem to the Python sys path with an import hook, then import
to file as Python"""
from fs.errors import NoSysPathError
try:
import ambry.build
module = sys.modules['ambry.build']
except ImportError:
module = imp.new_module('ambry.build')
sys.modules['ambry.build'] = module
bf = self.record
if not bf.has_contents:
from ambry.bundle import Bundle
return Bundle
try:
abs_path = self._fs.getsyspath(self.file_name)
except NoSysPathError:
abs_path = '<string>'
exec(compile(bf.contents, abs_path, 'exec'), module.__dict__)
return module.Bundle | python | {
"resource": ""
} |
q36996 | PythonSourceFile.import_lib | train | def import_lib(self):
"""Import the lib.py file into the bundle module"""
try:
import ambry.build
module = sys.modules['ambry.build']
except ImportError:
module = imp.new_module('ambry.build')
sys.modules['ambry.build'] = module
bf = self.record
if not bf.has_contents:
return
try:
exec (compile(bf.contents, self.path, 'exec'), module.__dict__)
except Exception:
self._bundle.error("Failed to load code from {}".format(self.path))
raise
# print(self.file_const, bundle.__dict__.keys())
# print(bf.contents)
return module | python | {
"resource": ""
} |
q36997 | SourceSchemaFile.record_to_objects | train | def record_to_objects(self):
"""Write from the stored file data to the source records"""
from ambry.orm import SourceTable
bsfile = self.record
failures = set()
# Clear out all of the columns from existing tables. We don't clear out the
# tables, since they may be referenced by sources
for row in bsfile.dict_row_reader:
st = self._dataset.source_table(row['table'])
if st:
st.columns[:] = []
self._dataset.commit()
for row in bsfile.dict_row_reader:
st = self._dataset.source_table(row['table'])
if not st:
st = self._dataset.new_source_table(row['table'])
# table_number += 1
if 'datatype' not in row:
row['datatype'] = 'unknown'
del row['table']
st.add_column(**row) # Create or update
if failures:
raise ConfigurationError('Failed to load source schema, missing sources: {} '.format(failures))
self._dataset.commit() | python | {
"resource": ""
} |
q36998 | ASQLSourceFile.execute | train | def execute(self):
""" Executes all sql statements from bundle.sql. """
from ambry.mprlib import execute_sql
execute_sql(self._bundle.library, self.record_content) | python | {
"resource": ""
} |
q36999 | BuildSourceFileAccessor.list_records | train | def list_records(self, file_const=None):
"""Iterate through the file records"""
for r in self._dataset.files:
if file_const and r.minor_type != file_const:
continue
yield self.instance_from_name(r.path) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.