_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q6900
|
_sample
|
train
|
def _sample(probability_vec):
"""Return random binary string, with given probabilities."""
return map(int,
numpy.random.random(probability_vec.size) <= probability_vec)
|
python
|
{
"resource": ""
}
|
q6901
|
_adjust_probability_vec_best
|
train
|
def _adjust_probability_vec_best(population, fitnesses, probability_vec,
adjust_rate):
"""Shift probabilities towards the best solution."""
best_solution = max(zip(fitnesses, population))[1]
# Shift probabilities towards best solution
return _adjust(probability_vec, best_solution, adjust_rate)
|
python
|
{
"resource": ""
}
|
q6902
|
_mutate_probability_vec
|
train
|
def _mutate_probability_vec(probability_vec, mutation_chance, mutation_adjust_rate):
"""Randomly adjust probabilities.
WARNING: Modifies probability_vec argument.
"""
bits_to_mutate = numpy.random.random(probability_vec.size) <= mutation_chance
probability_vec[bits_to_mutate] = _adjust(
probability_vec[bits_to_mutate],
numpy.random.random(numpy.sum(bits_to_mutate)), mutation_adjust_rate)
|
python
|
{
"resource": ""
}
|
q6903
|
benchmark_multi
|
train
|
def benchmark_multi(optimizer):
"""Benchmark an optimizer configuration on multiple functions."""
# Get our benchmark stats
all_stats = benchmark.compare(optimizer, PROBLEMS, runs=100)
return benchmark.aggregate(all_stats)
|
python
|
{
"resource": ""
}
|
q6904
|
_sample
|
train
|
def _sample(probabilities, population_size):
"""Return a random population, drawn with regard to a set of probabilities"""
population = []
for _ in range(population_size):
solution = []
for probability in probabilities:
# probability of 1.0: always 1
# probability of 0.0: always 0
if random.uniform(0.0, 1.0) < probability:
solution.append(1)
else:
solution.append(0)
population.append(solution)
return population
|
python
|
{
"resource": ""
}
|
q6905
|
_chance
|
train
|
def _chance(solution, pdf):
"""Return the chance of obtaining a solution from a pdf.
The probability of many independant weighted "coin flips" (one for each bit)
"""
# 1.0 - abs(bit - p) gives probability of bit given p
return _prod([1.0 - abs(bit - p) for bit, p in zip(solution, pdf)])
|
python
|
{
"resource": ""
}
|
q6906
|
_pdf_value
|
train
|
def _pdf_value(pdf, population, fitnesses, fitness_threshold):
"""Give the value of a pdf.
This represents the likelihood of a pdf generating solutions
that exceed the threshold.
"""
# Add the chance of obtaining a solution from the pdf
# when the fitness for that solution exceeds a threshold
value = 0.0
for solution, fitness in zip(population, fitnesses):
if fitness >= fitness_threshold:
# 1.0 + chance to avoid issues with chance of 0
value += math.log(1.0 + _chance(solution, pdf))
# The official equation states that value is now divided by len(fitnesses)
# however, this is unnecessary when we are only obtaining the best pdf,
# because every solution is of the same size
return value
|
python
|
{
"resource": ""
}
|
q6907
|
_update_pdf
|
train
|
def _update_pdf(population, fitnesses, pdfs, quantile):
"""Find a better pdf, based on fitnesses."""
# First we determine a fitness threshold based on a quantile of fitnesses
fitness_threshold = _get_quantile_cutoff(fitnesses, quantile)
# Then check all of our possible pdfs with a stochastic program
return _best_pdf(pdfs, population, fitnesses, fitness_threshold)
|
python
|
{
"resource": ""
}
|
q6908
|
binary_to_float
|
train
|
def binary_to_float(binary_list, lower_bound, upper_bound):
"""Return a floating point number between lower and upper bounds, from binary.
Args:
binary_list: list<int>; List of 0s and 1s.
The number of bits in this list determine the number of possible
values between lower and upper bound.
Increase the size of binary_list for more precise floating points.
lower_bound: Minimum value for output, inclusive.
A binary list of 0s will have this value.
upper_bound: Maximum value for output, inclusive.
A binary list of 1s will have this value.
Returns:
float; A floating point number.
"""
# Edge case for empty binary_list
if binary_list == []:
# With 0 bits, only one value can be represented,
# and we default to lower_bound
return lower_bound
# A little bit of math gets us a floating point
# number between upper and lower bound
# We look at the relative position of
# the integer corresponding to our binary list
# between the upper and lower bound,
# and offset that by lower bound
return ((
# Range between lower and upper bound
float(upper_bound - lower_bound)
# Divided by the maximum possible integer
/ (2**len(binary_list) - 1)
# Times the integer represented by the given binary
* binary_to_int(binary_list))
# Plus the lower bound
+ lower_bound)
|
python
|
{
"resource": ""
}
|
q6909
|
binary_to_int
|
train
|
def binary_to_int(binary_list, lower_bound=0, upper_bound=None):
"""Return the base 10 integer corresponding to a binary list.
The maximum value is determined by the number of bits in binary_list,
and upper_bound. The greater allowed by the two.
Args:
binary_list: list<int>; List of 0s and 1s.
lower_bound: Minimum value for output, inclusive.
A binary list of 0s will have this value.
upper_bound: Maximum value for output, inclusive.
If greater than this bound, we "bounce back".
Ex. w/ upper_bound = 2: [0, 1, 2, 2, 1, 0]
Ex.
raw_integer = 11, upper_bound = 10, return = 10
raw_integer = 12, upper_bound = 10, return = 9
Returns:
int; Integer value of the binary input.
"""
# Edge case for empty binary_list
if binary_list == []:
# With 0 bits, only one value can be represented,
# and we default to lower_bound
return lower_bound
else:
# The builtin int construction can take a base argument,
# but it requires a string,
# so we convert our binary list to a string
integer = int(''.join([str(bit) for bit in binary_list]), 2)
# Trim if over upper_bound
if (upper_bound is not None) and integer + lower_bound > upper_bound:
# Bounce back. Ex. w/ upper_bound = 2: [0, 1, 2, 2, 1, 0]
return upper_bound - (integer % (upper_bound - lower_bound + 1))
else:
# Not over upper_bound
return integer + lower_bound
|
python
|
{
"resource": ""
}
|
q6910
|
_int_to_binary
|
train
|
def _int_to_binary(integer, size=None):
"""Return bit list representation of integer.
If size is given, binary string is padded with 0s, or clipped to the size.
"""
binary_list = map(int, format(integer, 'b'))
if size is None:
return binary_list
else:
if len(binary_list) > size:
# Too long, take only last n
return binary_list[len(binary_list)-size:]
elif size > len(binary_list):
# Too short, pad
return [0]*(size-len(binary_list)) + binary_list
else:
# Just right
return binary_list
|
python
|
{
"resource": ""
}
|
q6911
|
RandomReal._generate_solution
|
train
|
def _generate_solution(self):
"""Return a single random solution."""
return common.random_real_solution(
self._solution_size, self._lower_bounds, self._upper_bounds)
|
python
|
{
"resource": ""
}
|
q6912
|
BaseTree._init_sub_groups
|
train
|
def _init_sub_groups(self, parent):
"""
Initialise sub-groups, and create any that do not already exist.
"""
if self._sub_groups:
for sub_group in self._sub_groups:
for component in split_path_components(sub_group):
fp = os.path.join(parent.full_path, component)
if os.path.exists(fp):
node = Node(name=component, parent=parent)
parent.children.append(node)
else:
node = parent.create_cgroup(component)
parent = node
self._init_children(node)
else:
self._init_children(parent)
|
python
|
{
"resource": ""
}
|
q6913
|
BaseTree._init_children
|
train
|
def _init_children(self, parent):
"""
Initialise each node's children - essentially build the tree.
"""
for dir_name in self.get_children_paths(parent.full_path):
child = Node(name=dir_name, parent=parent)
parent.children.append(child)
self._init_children(child)
|
python
|
{
"resource": ""
}
|
q6914
|
Node.full_path
|
train
|
def full_path(self):
"""Absolute system path to the node"""
if self.parent:
return os.path.join(self.parent.full_path, self.name)
return self.name
|
python
|
{
"resource": ""
}
|
q6915
|
Node.path
|
train
|
def path(self):
"""Node's relative path from the root node"""
if self.parent:
try:
parent_path = self.parent.path.encode()
except AttributeError:
parent_path = self.parent.path
return os.path.join(parent_path, self.name)
return b"/"
|
python
|
{
"resource": ""
}
|
q6916
|
Node._get_node_type
|
train
|
def _get_node_type(self):
"""Returns the current node's type"""
if self.parent is None:
return self.NODE_ROOT
elif self.parent.node_type == self.NODE_ROOT:
return self.NODE_CONTROLLER_ROOT
elif b".slice" in self.name or b'.partition' in self.name:
return self.NODE_SLICE
elif b".scope" in self.name:
return self.NODE_SCOPE
else:
return self.NODE_CGROUP
|
python
|
{
"resource": ""
}
|
q6917
|
Node._get_controller_type
|
train
|
def _get_controller_type(self):
"""Returns the current node's controller type"""
if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS:
return self.name
elif self.parent:
return self.parent.controller_type
else:
return None
|
python
|
{
"resource": ""
}
|
q6918
|
Node.create_cgroup
|
train
|
def create_cgroup(self, name):
"""
Create a cgroup by name and attach it under this node.
"""
node = Node(name, parent=self)
if node in self.children:
raise RuntimeError('Node {} already exists under {}'.format(name, self.path))
name = name.encode()
fp = os.path.join(self.full_path, name)
os.mkdir(fp)
self.children.append(node)
return node
|
python
|
{
"resource": ""
}
|
q6919
|
Node.delete_cgroup
|
train
|
def delete_cgroup(self, name):
"""
Delete a cgroup by name and detach it from this node.
Raises OSError if the cgroup is not empty.
"""
name = name.encode()
fp = os.path.join(self.full_path, name)
if os.path.exists(fp):
os.rmdir(fp)
node = Node(name, parent=self)
try:
self.children.remove(node)
except ValueError:
return
|
python
|
{
"resource": ""
}
|
q6920
|
Node.delete_empty_children
|
train
|
def delete_empty_children(self):
"""
Walk through the children of this node and delete any that are empty.
"""
for child in self.children:
child.delete_empty_children()
try:
if os.path.exists(child.full_path):
os.rmdir(child.full_path)
except OSError: pass
else: self.children.remove(child)
|
python
|
{
"resource": ""
}
|
q6921
|
NodeControlGroup.add_node
|
train
|
def add_node(self, node):
"""
A a Node object to the group. Only one node per cgroup is supported
"""
if self.controllers.get(node.controller_type, None):
raise RuntimeError("Cannot add node {} to the node group. A node for {} group is already assigned".format(
node,
node.controller_type
))
self.nodes.append(node)
if node.controller:
self.controllers[node.controller_type] = node.controller
setattr(self, node.controller_type, node.controller)
|
python
|
{
"resource": ""
}
|
q6922
|
NodeControlGroup.group_tasks
|
train
|
def group_tasks(self):
"""All tasks in the hierarchy, affected by this group."""
tasks = set()
for node in walk_tree(self):
for ctrl in node.controllers.values():
tasks.update(ctrl.tasks)
return tasks
|
python
|
{
"resource": ""
}
|
q6923
|
NodeControlGroup.tasks
|
train
|
def tasks(self):
"""Tasks in this exact group"""
tasks = set()
for ctrl in self.controllers.values():
tasks.update(ctrl.tasks)
return tasks
|
python
|
{
"resource": ""
}
|
q6924
|
Controller.filepath
|
train
|
def filepath(self, filename):
"""The full path to a file"""
return os.path.join(self.node.full_path, filename)
|
python
|
{
"resource": ""
}
|
q6925
|
Controller.get_property
|
train
|
def get_property(self, filename):
"""Opens the file and reads the value"""
with open(self.filepath(filename)) as f:
return f.read().strip()
|
python
|
{
"resource": ""
}
|
q6926
|
Controller.set_property
|
train
|
def set_property(self, filename, value):
"""Opens the file and writes the value"""
with open(self.filepath(filename), "w") as f:
return f.write(str(value))
|
python
|
{
"resource": ""
}
|
q6927
|
walk_tree
|
train
|
def walk_tree(root):
"""Pre-order depth-first"""
yield root
for child in root.children:
for el in walk_tree(child):
yield el
|
python
|
{
"resource": ""
}
|
q6928
|
walk_up_tree
|
train
|
def walk_up_tree(root):
"""Post-order depth-first"""
for child in root.children:
for el in walk_up_tree(child):
yield el
yield root
|
python
|
{
"resource": ""
}
|
q6929
|
SchemaAware.validate
|
train
|
def validate(self):
"""
Validate that this instance matches its schema.
"""
schema = Schema(self.__class__.SCHEMA)
resolver = RefResolver.from_schema(
schema,
store=REGISTRY,
)
validate(self, schema, resolver=resolver)
|
python
|
{
"resource": ""
}
|
q6930
|
SchemaAware.dumps
|
train
|
def dumps(self):
"""
Dump this instance as YAML.
"""
with closing(StringIO()) as fileobj:
self.dump(fileobj)
return fileobj.getvalue()
|
python
|
{
"resource": ""
}
|
q6931
|
SchemaAware.loads
|
train
|
def loads(cls, s):
"""
Load an instance of this class from YAML.
"""
with closing(StringIO(s)) as fileobj:
return cls.load(fileobj)
|
python
|
{
"resource": ""
}
|
q6932
|
SchemaAwareDict.property_schema
|
train
|
def property_schema(self, key):
"""
Lookup the schema for a specific property.
"""
schema = self.__class__.SCHEMA
# first try plain properties
plain_schema = schema.get("properties", {}).get(key)
if plain_schema is not None:
return plain_schema
# then try pattern properties
pattern_properties = schema.get("patternProperties", {})
for pattern, pattern_schema in pattern_properties.items():
if match(pattern, key):
return pattern_schema
# finally try additional properties (defaults to true per JSON Schema)
return schema.get("additionalProperties", True)
|
python
|
{
"resource": ""
}
|
q6933
|
make
|
train
|
def make(class_name, base, schema):
"""
Create a new schema aware type.
"""
return type(class_name, (base,), dict(SCHEMA=schema))
|
python
|
{
"resource": ""
}
|
q6934
|
make_definition
|
train
|
def make_definition(name, base, schema):
"""
Create a new definition.
"""
class_name = make_class_name(name)
cls = register(make(class_name, base, schema))
globals()[class_name] = cls
|
python
|
{
"resource": ""
}
|
q6935
|
register
|
train
|
def register(cls):
"""
Register a class.
"""
definition_name = make_definition_name(cls.__name__)
REGISTRY[definition_name] = cls
return cls
|
python
|
{
"resource": ""
}
|
q6936
|
lookup
|
train
|
def lookup(schema):
"""
Lookup a class by property schema.
"""
if not isinstance(schema, dict) or "$ref" not in schema:
return None
ref = schema["$ref"]
return REGISTRY.get(ref)
|
python
|
{
"resource": ""
}
|
q6937
|
LutronConnection.connect
|
train
|
def connect(self):
"""Connects to the lutron controller."""
if self._connected or self.is_alive():
raise ConnectionExistsError("Already connected")
# After starting the thread we wait for it to post us
# an event signifying that connection is established. This
# ensures that the caller only resumes when we are fully connected.
self.start()
with self._lock:
self._connect_cond.wait_for(lambda: self._connected)
|
python
|
{
"resource": ""
}
|
q6938
|
LutronConnection._send_locked
|
train
|
def _send_locked(self, cmd):
"""Sends the specified command to the lutron controller.
Assumes self._lock is held.
"""
_LOGGER.debug("Sending: %s" % cmd)
try:
self._telnet.write(cmd.encode('ascii') + b'\r\n')
except BrokenPipeError:
self._disconnect_locked()
|
python
|
{
"resource": ""
}
|
q6939
|
LutronConnection._disconnect_locked
|
train
|
def _disconnect_locked(self):
"""Closes the current connection. Assume self._lock is held."""
self._connected = False
self._connect_cond.notify_all()
self._telnet = None
_LOGGER.warning("Disconnected")
|
python
|
{
"resource": ""
}
|
q6940
|
LutronConnection._maybe_reconnect
|
train
|
def _maybe_reconnect(self):
"""Reconnects to the controller if we have been previously disconnected."""
with self._lock:
if not self._connected:
_LOGGER.info("Connecting")
self._do_login_locked()
self._connected = True
self._connect_cond.notify_all()
_LOGGER.info("Connected")
|
python
|
{
"resource": ""
}
|
q6941
|
LutronConnection.run
|
train
|
def run(self):
"""Main thread function to maintain connection and receive remote status."""
_LOGGER.info("Started")
while True:
self._maybe_reconnect()
line = ''
try:
# If someone is sending a command, we can lose our connection so grab a
# copy beforehand. We don't need the lock because if the connection is
# open, we are the only ones that will read from telnet (the reconnect
# code runs synchronously in this loop).
t = self._telnet
if t is not None:
line = t.read_until(b"\n")
except EOFError:
try:
self._lock.acquire()
self._disconnect_locked()
continue
finally:
self._lock.release()
self._recv_cb(line.decode('ascii').rstrip())
|
python
|
{
"resource": ""
}
|
q6942
|
LutronXmlDbParser.parse
|
train
|
def parse(self):
"""Main entrypoint into the parser. It interprets and creates all the
relevant Lutron objects and stuffs them into the appropriate hierarchy."""
import xml.etree.ElementTree as ET
root = ET.fromstring(self._xml_db_str)
# The structure is something like this:
# <Areas>
# <Area ...>
# <DeviceGroups ...>
# <Scenes ...>
# <ShadeGroups ...>
# <Outputs ...>
# <Areas ...>
# <Area ...>
# First area is useless, it's the top-level project area that defines the
# "house". It contains the real nested Areas tree, which is the one we want.
top_area = root.find('Areas').find('Area')
self.project_name = top_area.get('Name')
areas = top_area.find('Areas')
for area_xml in areas.getiterator('Area'):
area = self._parse_area(area_xml)
self.areas.append(area)
return True
|
python
|
{
"resource": ""
}
|
q6943
|
LutronXmlDbParser._parse_area
|
train
|
def _parse_area(self, area_xml):
"""Parses an Area tag, which is effectively a room, depending on how the
Lutron controller programming was done."""
area = Area(self._lutron,
name=area_xml.get('Name'),
integration_id=int(area_xml.get('IntegrationID')),
occupancy_group_id=area_xml.get('OccupancyGroupAssignedToID'))
for output_xml in area_xml.find('Outputs'):
output = self._parse_output(output_xml)
area.add_output(output)
# device group in our case means keypad
# device_group.get('Name') is the location of the keypad
for device_group in area_xml.find('DeviceGroups'):
if device_group.tag == 'DeviceGroup':
devs = device_group.find('Devices')
elif device_group.tag == 'Device':
devs = [device_group]
else:
_LOGGER.info("Unknown tag in DeviceGroups child %s" % devs)
devs = []
for device_xml in devs:
if device_xml.tag != 'Device':
continue
if device_xml.get('DeviceType') in (
'SEETOUCH_KEYPAD',
'SEETOUCH_TABLETOP_KEYPAD',
'PICO_KEYPAD',
'HYBRID_SEETOUCH_KEYPAD',
'MAIN_REPEATER'):
keypad = self._parse_keypad(device_xml)
area.add_keypad(keypad)
elif device_xml.get('DeviceType') == 'MOTION_SENSOR':
motion_sensor = self._parse_motion_sensor(device_xml)
area.add_sensor(motion_sensor)
#elif device_xml.get('DeviceType') == 'VISOR_CONTROL_RECEIVER':
return area
|
python
|
{
"resource": ""
}
|
q6944
|
LutronXmlDbParser._parse_button
|
train
|
def _parse_button(self, keypad, component_xml):
"""Parses a button device that part of a keypad."""
button_xml = component_xml.find('Button')
name = button_xml.get('Engraving')
button_type = button_xml.get('ButtonType')
direction = button_xml.get('Direction')
# Hybrid keypads have dimmer buttons which have no engravings.
if button_type == 'SingleSceneRaiseLower':
name = 'Dimmer ' + direction
if not name:
name = "Unknown Button"
button = Button(self._lutron, keypad,
name=name,
num=int(component_xml.get('ComponentNumber')),
button_type=button_type,
direction=direction)
return button
|
python
|
{
"resource": ""
}
|
q6945
|
LutronXmlDbParser._parse_led
|
train
|
def _parse_led(self, keypad, component_xml):
"""Parses an LED device that part of a keypad."""
component_num = int(component_xml.get('ComponentNumber'))
led_num = component_num - 80
led = Led(self._lutron, keypad,
name=('LED %d' % led_num),
led_num=led_num,
component_num=component_num)
return led
|
python
|
{
"resource": ""
}
|
q6946
|
LutronXmlDbParser._parse_motion_sensor
|
train
|
def _parse_motion_sensor(self, sensor_xml):
"""Parses a motion sensor object.
TODO: We don't actually do anything with these yet. There's a lot of info
that needs to be managed to do this right. We'd have to manage the occupancy
groups, what's assigned to them, and when they go (un)occupied. We'll handle
this later.
"""
return MotionSensor(self._lutron,
name=sensor_xml.get('Name'),
integration_id=int(sensor_xml.get('IntegrationID')))
|
python
|
{
"resource": ""
}
|
q6947
|
Lutron.subscribe
|
train
|
def subscribe(self, obj, handler):
"""Subscribes to status updates of the requested object.
DEPRECATED
The handler will be invoked when the controller sends a notification
regarding changed state. The user can then further query the object for the
state itself."""
if not isinstance(obj, LutronEntity):
raise InvalidSubscription("Subscription target not a LutronEntity")
_LOGGER.warning("DEPRECATED: Subscribing via Lutron.subscribe is obsolete. "
"Please use LutronEntity.subscribe")
if obj not in self._legacy_subscribers:
self._legacy_subscribers[obj] = handler
obj.subscribe(self._dispatch_legacy_subscriber, None)
|
python
|
{
"resource": ""
}
|
q6948
|
Lutron._dispatch_legacy_subscriber
|
train
|
def _dispatch_legacy_subscriber(self, obj, *args, **kwargs):
"""This dispatches the registered callback for 'obj'. This is only used
for legacy subscribers since new users should register with the target
object directly."""
if obj in self._legacy_subscribers:
self._legacy_subscribers[obj](obj)
|
python
|
{
"resource": ""
}
|
q6949
|
Lutron._recv
|
train
|
def _recv(self, line):
"""Invoked by the connection manager to process incoming data."""
if line == '':
return
# Only handle query response messages, which are also sent on remote status
# updates (e.g. user manually pressed a keypad button)
if line[0] != Lutron.OP_RESPONSE:
_LOGGER.debug("ignoring %s" % line)
return
parts = line[1:].split(',')
cmd_type = parts[0]
integration_id = int(parts[1])
args = parts[2:]
if cmd_type not in self._ids:
_LOGGER.info("Unknown cmd %s (%s)" % (cmd_type, line))
return
ids = self._ids[cmd_type]
if integration_id not in ids:
_LOGGER.warning("Unknown id %d (%s)" % (integration_id, line))
return
obj = ids[integration_id]
handled = obj.handle_update(args)
|
python
|
{
"resource": ""
}
|
q6950
|
Lutron.send
|
train
|
def send(self, op, cmd, integration_id, *args):
"""Formats and sends the requested command to the Lutron controller."""
out_cmd = ",".join(
(cmd, str(integration_id)) + tuple((str(x) for x in args)))
self._conn.send(op + out_cmd)
|
python
|
{
"resource": ""
}
|
q6951
|
Lutron.load_xml_db
|
train
|
def load_xml_db(self):
"""Load the Lutron database from the server."""
import urllib.request
xmlfile = urllib.request.urlopen('http://' + self._host + '/DbXmlInfo.xml')
xml_db = xmlfile.read()
xmlfile.close()
_LOGGER.info("Loaded xml db")
parser = LutronXmlDbParser(lutron=self, xml_db_str=xml_db)
assert(parser.parse()) # throw our own exception
self._areas = parser.areas
self._name = parser.project_name
_LOGGER.info('Found Lutron project: %s, %d areas' % (
self._name, len(self.areas)))
return True
|
python
|
{
"resource": ""
}
|
q6952
|
_RequestHelper.request
|
train
|
def request(self, action):
"""Request an action to be performed, in case one."""
ev = threading.Event()
first = False
with self.__lock:
if len(self.__events) == 0:
first = True
self.__events.append(ev)
if first:
action()
return ev
|
python
|
{
"resource": ""
}
|
q6953
|
LutronEntity._dispatch_event
|
train
|
def _dispatch_event(self, event: LutronEvent, params: Dict):
"""Dispatches the specified event to all the subscribers."""
for handler, context in self._subscribers:
handler(self, context, event, params)
|
python
|
{
"resource": ""
}
|
q6954
|
LutronEntity.subscribe
|
train
|
def subscribe(self, handler: LutronEventHandler, context):
"""Subscribes to events from this entity.
handler: A callable object that takes the following arguments (in order)
obj: the LutrongEntity object that generated the event
context: user-supplied (to subscribe()) context object
event: the LutronEvent that was generated.
params: a dict of event-specific parameters
context: User-supplied, opaque object that will be passed to handler.
"""
self._subscribers.append((handler, context))
|
python
|
{
"resource": ""
}
|
q6955
|
Output.level
|
train
|
def level(self):
"""Returns the current output level by querying the remote controller."""
ev = self._query_waiters.request(self.__do_query_level)
ev.wait(1.0)
return self._level
|
python
|
{
"resource": ""
}
|
q6956
|
Output.level
|
train
|
def level(self, new_level):
"""Sets the new output level."""
if self._level == new_level:
return
self._lutron.send(Lutron.OP_EXECUTE, Output._CMD_TYPE, self._integration_id,
Output._ACTION_ZONE_LEVEL, "%.2f" % new_level)
self._level = new_level
|
python
|
{
"resource": ""
}
|
q6957
|
Button.press
|
train
|
def press(self):
"""Triggers a simulated button press to the Keypad."""
self._lutron.send(Lutron.OP_EXECUTE, Keypad._CMD_TYPE, self._keypad.id,
self.component_number, Button._ACTION_PRESS)
|
python
|
{
"resource": ""
}
|
q6958
|
Led.__do_query_state
|
train
|
def __do_query_state(self):
"""Helper to perform the actual query for the current LED state."""
self._lutron.send(Lutron.OP_QUERY, Keypad._CMD_TYPE, self._keypad.id,
self.component_number, Led._ACTION_LED_STATE)
|
python
|
{
"resource": ""
}
|
q6959
|
Led.state
|
train
|
def state(self):
"""Returns the current LED state by querying the remote controller."""
ev = self._query_waiters.request(self.__do_query_state)
ev.wait(1.0)
return self._state
|
python
|
{
"resource": ""
}
|
q6960
|
Led.state
|
train
|
def state(self, new_state: bool):
"""Sets the new led state.
new_state: bool
"""
self._lutron.send(Lutron.OP_EXECUTE, Keypad._CMD_TYPE, self._keypad.id,
self.component_number, Led._ACTION_LED_STATE,
int(new_state))
self._state = new_state
|
python
|
{
"resource": ""
}
|
q6961
|
Keypad.add_button
|
train
|
def add_button(self, button):
"""Adds a button that's part of this keypad. We'll use this to
dispatch button events."""
self._buttons.append(button)
self._components[button.component_number] = button
|
python
|
{
"resource": ""
}
|
q6962
|
Keypad.add_led
|
train
|
def add_led(self, led):
"""Add an LED that's part of this keypad."""
self._leds.append(led)
self._components[led.component_number] = led
|
python
|
{
"resource": ""
}
|
q6963
|
Keypad.handle_update
|
train
|
def handle_update(self, args):
"""The callback invoked by the main event loop if there's an event from this keypad."""
component = int(args[0])
action = int(args[1])
params = [int(x) for x in args[2:]]
_LOGGER.debug("Updating %d(%s): c=%d a=%d params=%s" % (
self._integration_id, self._name, component, action, params))
if component in self._components:
return self._components[component].handle_update(action, params)
return False
|
python
|
{
"resource": ""
}
|
q6964
|
run_validator
|
train
|
def run_validator(pattern):
"""
Validates a pattern against the STIX Pattern grammar. Error messages are
returned in a list. The test passed if the returned list is empty.
"""
start = ''
if isinstance(pattern, six.string_types):
start = pattern[:2]
pattern = InputStream(pattern)
if not start:
start = pattern.readline()[:2]
pattern.seek(0)
parseErrListener = STIXPatternErrorListener()
lexer = STIXPatternLexer(pattern)
# it always adds a console listener by default... remove it.
lexer.removeErrorListeners()
stream = CommonTokenStream(lexer)
parser = STIXPatternParser(stream)
parser.buildParseTrees = False
# it always adds a console listener by default... remove it.
parser.removeErrorListeners()
parser.addErrorListener(parseErrListener)
# To improve error messages, replace "<INVALID>" in the literal
# names with symbolic names. This is a hack, but seemed like
# the simplest workaround.
for i, lit_name in enumerate(parser.literalNames):
if lit_name == u"<INVALID>":
parser.literalNames[i] = parser.symbolicNames[i]
parser.pattern()
# replace with easier-to-understand error message
if not (start[0] == '[' or start == '(['):
parseErrListener.err_strings[0] = "FAIL: Error found at line 1:0. " \
"input is missing square brackets"
return parseErrListener.err_strings
|
python
|
{
"resource": ""
}
|
q6965
|
validate
|
train
|
def validate(user_input, ret_errs=False, print_errs=False):
"""
Wrapper for run_validator function that returns True if the user_input
contains a valid STIX pattern or False otherwise. The error messages may
also be returned or printed based upon the ret_errs and print_errs arg
values.
"""
errs = run_validator(user_input)
passed = len(errs) == 0
if print_errs:
for err in errs:
print(err)
if ret_errs:
return passed, errs
return passed
|
python
|
{
"resource": ""
}
|
q6966
|
main
|
train
|
def main():
"""
Continues to validate patterns until it encounters EOF within a pattern
file or Ctrl-C is pressed by the user.
"""
parser = argparse.ArgumentParser(description='Validate STIX Patterns.')
parser.add_argument('-f', '--file',
help="Specify this arg to read patterns from a file.",
type=argparse.FileType("r"))
args = parser.parse_args()
pass_count = fail_count = 0
# I tried using a generator (where each iteration would run raw_input()),
# but raw_input()'s behavior seems to change when called from within a
# generator: I only get one line, then the generator completes! I don't
# know why behavior changes...
import functools
if args.file:
nextpattern = args.file.readline
else:
nextpattern = functools.partial(six.moves.input, "Enter a pattern to validate: ")
try:
while True:
pattern = nextpattern()
if not pattern:
break
tests_passed, err_strings = validate(pattern, True)
if tests_passed:
print("\nPASS: %s" % pattern)
pass_count += 1
else:
for err in err_strings:
print(err, '\n')
fail_count += 1
except (EOFError, KeyboardInterrupt):
pass
finally:
if args.file:
args.file.close()
print("\nPASSED:", pass_count, " patterns")
print("FAILED:", fail_count, " patterns")
|
python
|
{
"resource": ""
}
|
q6967
|
PDFView.get
|
train
|
def get(self, request, *args, **kwargs):
"""
Return a HTTPResponse either of a PDF file or HTML.
:rtype: HttpResponse
"""
if 'html' in request.GET:
# Output HTML
content = self.render_html(*args, **kwargs)
return HttpResponse(content)
else:
# Output PDF
content = self.render_pdf(*args, **kwargs)
response = HttpResponse(content, content_type='application/pdf')
if (not self.inline or 'download' in request.GET) and 'inline' not in request.GET:
response['Content-Disposition'] = 'attachment; filename=%s' % self.get_filename()
response['Content-Length'] = len(content)
return response
|
python
|
{
"resource": ""
}
|
q6968
|
PDFView.render_pdf
|
train
|
def render_pdf(self, *args, **kwargs):
"""
Render the PDF and returns as bytes.
:rtype: bytes
"""
html = self.render_html(*args, **kwargs)
options = self.get_pdfkit_options()
if 'debug' in self.request.GET and settings.DEBUG:
options['debug-javascript'] = 1
kwargs = {}
wkhtmltopdf_bin = os.environ.get('WKHTMLTOPDF_BIN')
if wkhtmltopdf_bin:
kwargs['configuration'] = pdfkit.configuration(wkhtmltopdf=wkhtmltopdf_bin)
pdf = pdfkit.from_string(html, False, options, **kwargs)
return pdf
|
python
|
{
"resource": ""
}
|
q6969
|
PDFView.get_filename
|
train
|
def get_filename(self):
"""
Return ``self.filename`` if set otherwise return the template basename with a ``.pdf`` extension.
:rtype: str
"""
if self.filename is None:
name = splitext(basename(self.template_name))[0]
return '{}.pdf'.format(name)
return self.filename
|
python
|
{
"resource": ""
}
|
q6970
|
PDFView.render_html
|
train
|
def render_html(self, *args, **kwargs):
"""
Renders the template.
:rtype: str
"""
static_url = '%s://%s%s' % (self.request.scheme, self.request.get_host(), settings.STATIC_URL)
media_url = '%s://%s%s' % (self.request.scheme, self.request.get_host(), settings.MEDIA_URL)
with override_settings(STATIC_URL=static_url, MEDIA_URL=media_url):
template = loader.get_template(self.template_name)
context = self.get_context_data(*args, **kwargs)
html = template.render(context)
return html
|
python
|
{
"resource": ""
}
|
q6971
|
Pattern.inspect
|
train
|
def inspect(self):
"""
Inspect a pattern. This gives information regarding the sorts of
operations, content, etc in use in the pattern.
:return: Pattern information
"""
inspector = stix2patterns.inspector.InspectionListener()
self.walk(inspector)
return inspector.pattern_data()
|
python
|
{
"resource": ""
}
|
q6972
|
Pattern.__do_parse
|
train
|
def __do_parse(self, pattern_str):
"""
Parses the given pattern and returns the antlr parse tree.
:param pattern_str: The STIX pattern
:return: The parse tree
:raises ParseException: If there is a parse error
"""
in_ = antlr4.InputStream(pattern_str)
lexer = STIXPatternLexer(in_)
lexer.removeErrorListeners() # remove the default "console" listener
token_stream = antlr4.CommonTokenStream(lexer)
parser = STIXPatternParser(token_stream)
parser.removeErrorListeners() # remove the default "console" listener
error_listener = ParserErrorListener()
parser.addErrorListener(error_listener)
# I found no public API for this...
# The default error handler tries to keep parsing, and I don't
# think that's appropriate here. (These error handlers are only for
# handling the built-in RecognitionException errors.)
parser._errHandler = antlr4.BailErrorStrategy()
# To improve error messages, replace "<INVALID>" in the literal
# names with symbolic names. This is a hack, but seemed like
# the simplest workaround.
for i, lit_name in enumerate(parser.literalNames):
if lit_name == u"<INVALID>":
parser.literalNames[i] = parser.symbolicNames[i]
# parser.setTrace(True)
try:
tree = parser.pattern()
# print(tree.toStringTree(recog=parser))
return tree
except antlr4.error.Errors.ParseCancellationException as e:
# The cancellation exception wraps the real RecognitionException
# which caused the parser to bail.
real_exc = e.args[0]
# I want to bail when the first error is hit. But I also want
# a decent error message. When an error is encountered in
# Parser.match(), the BailErrorStrategy produces the
# ParseCancellationException. It is not a subclass of
# RecognitionException, so none of the 'except' clauses which would
# normally report an error are invoked.
#
# Error message creation is buried in the ErrorStrategy, and I can
# (ab)use the API to get a message: register an error listener with
# the parser, force an error report, then get the message out of the
# listener. Error listener registration is above; now we force its
# invocation. Wish this could be cleaner...
parser._errHandler.reportError(parser, real_exc)
# should probably chain exceptions if we can...
# Should I report the cancellation or recognition exception as the
# cause...?
six.raise_from(ParseException(error_listener.error_message),
real_exc)
|
python
|
{
"resource": ""
}
|
q6973
|
Response.exists
|
train
|
def exists(self):
"""
Returns true if the job is still running or zero-os still knows about this job ID
After a job is finished, a job remains on zero-os for max of 5min where you still can read the job result
after the 5 min is gone, the job result is no more fetchable
:return: bool
"""
r = self._client._redis
flag = '{}:flag'.format(self._queue)
return bool(r.exists(flag))
|
python
|
{
"resource": ""
}
|
q6974
|
Response.stream
|
train
|
def stream(self, callback=None):
"""
Runtime copy of job messages. This required the 'stream` flag to be set to True otherwise it will
not be able to copy any output, while it will block until the process exits.
:note: This function will block until it reaches end of stream or the process is no longer running.
:param callback: callback method that will get called for each received message
callback accepts 3 arguments
- level int: the log message levels, refer to the docs for available levels
and their meanings
- message str: the actual output message
- flags int: flags associated with this message
- 0x2 means EOF with success exit status
- 0x4 means EOF with error
for example (eof = flag & 0x6) eof will be true for last message u will ever
receive on this callback.
Note: if callback is none, a default callback will be used that prints output on stdout/stderr
based on level.
:return: None
"""
if callback is None:
callback = Response.__default
if not callable(callback):
raise Exception('callback must be callable')
queue = 'stream:%s' % self.id
r = self._client._redis
# we can terminate quickly by checking if the process is not running and it has no queued output.
# if not self.running and r.llen(queue) == 0:
# return
while True:
data = r.blpop(queue, 10)
if data is None:
if not self.running:
break
continue
_, body = data
payload = json.loads(body.decode())
message = payload['message']
line = message['message']
meta = message['meta']
callback(meta >> 16, line, meta & 0xff)
if meta & 0x6 != 0:
break
|
python
|
{
"resource": ""
}
|
q6975
|
JSONResponse.get
|
train
|
def get(self, timeout=None):
"""
Get response as json, will fail if the job doesn't return a valid json response
:param timeout: client side timeout in seconds
:return: int
"""
result = super().get(timeout)
if result.state != 'SUCCESS':
raise ResultError(result.data, result.code)
if result.level != 20:
raise ResultError('not a json response: %d' % result.level, 406)
return json.loads(result.data)
|
python
|
{
"resource": ""
}
|
q6976
|
JobManager.list
|
train
|
def list(self, id=None):
"""
List all running jobs
:param id: optional ID for the job to list
"""
args = {'id': id}
self._job_chk.check(args)
return self._client.json('job.list', args)
|
python
|
{
"resource": ""
}
|
q6977
|
JobManager.kill
|
train
|
def kill(self, id, signal=signal.SIGTERM):
"""
Kill a job with given id
:WARNING: beware of what u kill, if u killed redis for example core0 or coreX won't be reachable
:param id: job id to kill
"""
args = {
'id': id,
'signal': int(signal),
}
self._kill_chk.check(args)
return self._client.json('job.kill', args)
|
python
|
{
"resource": ""
}
|
q6978
|
ProcessManager.list
|
train
|
def list(self, id=None):
"""
List all running processes
:param id: optional PID for the process to list
"""
args = {'pid': id}
self._process_chk.check(args)
return self._client.json('process.list', args)
|
python
|
{
"resource": ""
}
|
q6979
|
FilesystemManager.open
|
train
|
def open(self, file, mode='r', perm=0o0644):
"""
Opens a file on the node
:param file: file path to open
:param mode: open mode
:param perm: file permission in octet form
mode:
'r' read only
'w' write only (truncate)
'+' read/write
'x' create if not exist
'a' append
:return: a file descriptor
"""
args = {
'file': file,
'mode': mode,
'perm': perm,
}
return self._client.json('filesystem.open', args)
|
python
|
{
"resource": ""
}
|
q6980
|
FilesystemManager.move
|
train
|
def move(self, path, destination):
"""
Move a path to destination
:param path: source
:param destination: destination
:return:
"""
args = {
'path': path,
'destination': destination,
}
return self._client.json('filesystem.move', args)
|
python
|
{
"resource": ""
}
|
q6981
|
FilesystemManager.read
|
train
|
def read(self, fd):
"""
Read a block from the given file descriptor
:param fd: file descriptor
:return: bytes
"""
args = {
'fd': fd,
}
data = self._client.json('filesystem.read', args)
return base64.decodebytes(data.encode())
|
python
|
{
"resource": ""
}
|
q6982
|
FilesystemManager.write
|
train
|
def write(self, fd, bytes):
"""
Write a block of bytes to an open file descriptor (that is open with one of the writing modes
:param fd: file descriptor
:param bytes: bytes block to write
:return:
:note: don't overkill the node with large byte chunks, also for large file upload check the upload method.
"""
args = {
'fd': fd,
'block': base64.encodebytes(bytes).decode(),
}
return self._client.json('filesystem.write', args)
|
python
|
{
"resource": ""
}
|
q6983
|
BaseClient.bash
|
train
|
def bash(self, script, stdin='', queue=None, max_time=None, stream=False, tags=None, id=None):
"""
Execute a bash script, or run a process inside a bash shell.
:param script: Script to execute (can be multiline script)
:param stdin: Stdin data to feed to the script
:param id: job id. Auto generated if not defined.
:return:
"""
args = {
'script': script,
'stdin': stdin,
}
self._bash_chk.check(args)
response = self.raw(command='bash', arguments=args,
queue=queue, max_time=max_time, stream=stream, tags=tags, id=id)
return response
|
python
|
{
"resource": ""
}
|
q6984
|
ContainerManager.terminate
|
train
|
def terminate(self, container):
"""
Terminate a container given it's id
:param container: container id
:return:
"""
self._client_chk.check(container)
args = {
'container': int(container),
}
response = self._client.raw('corex.terminate', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to terminate container: %s' % result.data)
|
python
|
{
"resource": ""
}
|
q6985
|
ContainerManager.nic_add
|
train
|
def nic_add(self, container, nic):
"""
Hot plug a nic into a container
:param container: container ID
:param nic: {
'type': nic_type # one of default, bridge, zerotier, macvlan, passthrough, vlan, or vxlan (note, vlan and vxlan only supported by ovs)
'id': id # depends on the type
bridge: bridge name,
zerotier: network id,
macvlan: the parent link name,
passthrough: the link name,
vlan: the vlan tag,
vxlan: the vxlan id
'name': name of the nic inside the container (ignored in zerotier type)
'hwaddr': Mac address of nic.
'config': { # config is only honored for bridge, vlan, and vxlan types
'dhcp': bool,
'cidr': static_ip # ip/mask
'gateway': gateway
'dns': [dns]
}
}
:return:
"""
args = {
'container': container,
'nic': nic
}
self._nic_add.check(args)
return self._client.json('corex.nic-add', args)
|
python
|
{
"resource": ""
}
|
q6986
|
ContainerManager.nic_remove
|
train
|
def nic_remove(self, container, index):
"""
Hot unplug of nic from a container
Note: removing a nic, doesn't remove the nic from the container info object, instead it sets it's state
to `destroyed`.
:param container: container ID
:param index: index of the nic as returned in the container object info (as shown by container.list())
:return:
"""
args = {
'container': container,
'index': index
}
self._nic_remove.check(args)
return self._client.json('corex.nic-remove', args)
|
python
|
{
"resource": ""
}
|
q6987
|
ContainerManager.client
|
train
|
def client(self, container):
"""
Return a client instance that is bound to that container.
:param container: container id
:return: Client object bound to the specified container id
Return a ContainerResponse from container.create
"""
self._client_chk.check(container)
return ContainerClient(self._client, int(container))
|
python
|
{
"resource": ""
}
|
q6988
|
ContainerManager.backup
|
train
|
def backup(self, container, url):
"""
Backup a container to the given restic url
all restic urls are supported
:param container:
:param url: Url to restic repo
examples
(file:///path/to/restic/?password=<password>)
:return: Json response to the backup job (do .get() to get the snapshot ID
"""
args = {
'container': container,
'url': url,
}
return JSONResponse(self._client.raw('corex.backup', args))
|
python
|
{
"resource": ""
}
|
q6989
|
ContainerManager.restore
|
train
|
def restore(self, url, tags=None):
"""
Full restore of a container backup. This restore method will recreate
an exact copy of the backedup container (including same network setup, and other
configurations as defined by the `create` method.
To just restore the container data, and use new configuration, use the create method instead
with the `root_url` set to `restic:<url>`
:param url: Snapshot url, the snapshot ID is passed as a url fragment
examples:
`file:///path/to/restic/repo?password=<password>#<snapshot-id>`
:param tags: this will always override the original container tags (even if not set)
:return:
"""
args = {
'url': url,
}
return JSONResponse(self._client.raw('corex.restore', args, tags=tags))
|
python
|
{
"resource": ""
}
|
q6990
|
BridgeManager.delete
|
train
|
def delete(self, bridge):
"""
Delete a bridge by name
:param bridge: bridge name
:return:
"""
args = {
'name': bridge,
}
self._bridge_chk.check(args)
return self._client.json('bridge.delete', args)
|
python
|
{
"resource": ""
}
|
q6991
|
BridgeManager.nic_add
|
train
|
def nic_add(self, bridge, nic):
"""
Attach a nic to a bridge
:param bridge: bridge name
:param nic: nic name
"""
args = {
'name': bridge,
'nic': nic,
}
self._nic_add_chk.check(args)
return self._client.json('bridge.nic-add', args)
|
python
|
{
"resource": ""
}
|
q6992
|
BridgeManager.nic_remove
|
train
|
def nic_remove(self, nic):
"""
Detach a nic from a bridge
:param nic: nic name to detach
"""
args = {
'nic': nic,
}
self._nic_remove_chk.check(args)
return self._client.json('bridge.nic-remove', args)
|
python
|
{
"resource": ""
}
|
q6993
|
BridgeManager.nic_list
|
train
|
def nic_list(self, bridge):
"""
List nics attached to bridge
:param bridge: bridge name
"""
args = {
'name': bridge,
}
self._bridge_chk.check(args)
return self._client.json('bridge.nic-list', args)
|
python
|
{
"resource": ""
}
|
q6994
|
DiskManager.list
|
train
|
def list(self):
"""
List available block devices
"""
response = self._client.raw('disk.list', {})
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to list disks: %s' % result.stderr)
if result.level != 20: # 20 is JSON output.
raise RuntimeError('invalid response type from disk.list command')
data = result.data.strip()
if data:
return json.loads(data)
else:
return {}
|
python
|
{
"resource": ""
}
|
q6995
|
DiskManager.getinfo
|
train
|
def getinfo(self, disk, part=''):
"""
Get more info about a disk or a disk partition
:param disk: (/dev/sda, /dev/sdb, etc..)
:param part: (/dev/sda1, /dev/sdb2, etc...)
:return: a dict with {"blocksize", "start", "size", and "free" sections}
"""
args = {
"disk": disk,
"part": part,
}
self._getpart_chk.check(args)
response = self._client.raw('disk.getinfo', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to get info: %s' % result.data)
if result.level != 20: # 20 is JSON output.
raise RuntimeError('invalid response type from disk.getinfo command')
data = result.data.strip()
if data:
return json.loads(data)
else:
return {}
|
python
|
{
"resource": ""
}
|
q6996
|
DiskManager.seektime
|
train
|
def seektime(self, disk):
"""
Gives seek latency on disk which is a very good indication to the `type` of the disk.
it's a very good way to verify if the underlying disk type is SSD or HDD
:param disk: disk path or name (/dev/sda, or sda)
:return: a dict as follows {'device': '<device-path>', 'elapsed': <seek-time in us', 'type': '<SSD or HDD>'}
"""
args = {
'disk': disk,
}
self._seektime_chk.check(args)
return self._client.json("disk.seektime", args)
|
python
|
{
"resource": ""
}
|
q6997
|
BtrfsManager.device_add
|
train
|
def device_add(self, mountpoint, *device):
"""
Add one or more devices to btrfs filesystem mounted under `mountpoint`
:param mountpoint: mount point of the btrfs system
:param devices: one ore more devices to add
:return:
"""
if len(device) == 0:
return
args = {
'mountpoint': mountpoint,
'devices': device,
}
self._device_chk.check(args)
self._client.sync('btrfs.device_add', args)
|
python
|
{
"resource": ""
}
|
q6998
|
BtrfsManager.subvol_snapshot
|
train
|
def subvol_snapshot(self, source, destination, read_only=False):
"""
Take a snapshot
:param source: source path of subvol
:param destination: destination path of snapshot
:param read_only: Set read-only on the snapshot
:return:
"""
args = {
"source": source,
"destination": destination,
"read_only": read_only,
}
self._subvol_snapshot_chk.check(args)
self._client.sync('btrfs.subvol_snapshot', args)
|
python
|
{
"resource": ""
}
|
q6999
|
ZerotierManager.join
|
train
|
def join(self, network):
"""
Join a zerotier network
:param network: network id to join
:return:
"""
args = {'network': network}
self._network_chk.check(args)
response = self._client.raw('zerotier.join', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to join zerotier network: %s', result.stderr)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.