sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def can_route(self, endpoint, method=None, **kwargs):
"""Make sure we can route to the given endpoint or url.
This checks for `http.get` permission (or other methods) on the ACL of
route functions, attached via the `ACL` decorator.
:param endpoint: A URL or endpoint to check for permission to access.
:param method: The HTTP method to check; defaults to `'GET'`.
:param **kwargs: The context to pass to predicates.
"""
view = flask.current_app.view_functions.get(endpoint)
if not view:
endpoint, args = flask._request_ctx.top.match(endpoint)
view = flask.current_app.view_functions.get(endpoint)
if not view:
return False
return self.can('http.' + (method or 'GET').lower(), view, **kwargs) | Make sure we can route to the given endpoint or url.
This checks for `http.get` permission (or other methods) on the ACL of
route functions, attached via the `ACL` decorator.
:param endpoint: A URL or endpoint to check for permission to access.
:param method: The HTTP method to check; defaults to `'GET'`.
:param **kwargs: The context to pass to predicates. | entailment |
def _sendTo(self, proto):
"""
When sent, call the C{startProtocol} method on the virtual transport
object.
@see: L{vertex.ptcp.PTCP.startProtocol}
@see: L{vertex.q2q.VirtualTransport.startProtocol}
@param proto: the AMP protocol that this is being sent on.
"""
# XXX This is overriding a private interface
super(ConnectionStartBox, self)._sendTo(proto)
self.virtualTransport.startProtocol() | When sent, call the C{startProtocol} method on the virtual transport
object.
@see: L{vertex.ptcp.PTCP.startProtocol}
@see: L{vertex.q2q.VirtualTransport.startProtocol}
@param proto: the AMP protocol that this is being sent on. | entailment |
def makeResponse(cls, objects, proto):
"""
Create a response dictionary using this L{Virtual} command's schema; do
the same thing as L{Command.makeResponse}, but additionally do
addition.
@param objects: The dictionary of strings mapped to Python objects.
@param proto: The AMP protocol that this command is serialized to.
@return: A L{ConnectionStartBox} containing the serialized form of
C{objects}.
"""
tpt = objects.pop('__transport__')
# XXX Using a private API
return _objectsToStrings(
objects, cls.response,
ConnectionStartBox(tpt),
proto) | Create a response dictionary using this L{Virtual} command's schema; do
the same thing as L{Command.makeResponse}, but additionally do
addition.
@param objects: The dictionary of strings mapped to Python objects.
@param proto: The AMP protocol that this command is serialized to.
@return: A L{ConnectionStartBox} containing the serialized form of
C{objects}. | entailment |
def get_trace_id(request):
"""Gets the trace id based on a request. If not present with the request,
create a custom (depending on config: `zipkin.trace_id_generator`) or a
completely random trace id.
:param: current active pyramid request
:returns: a 64-bit hex string
"""
if 'X-B3-TraceId' in request.headers:
trace_id = _convert_signed_hex(request.headers['X-B3-TraceId'])
# Tolerates 128 bit X-B3-TraceId by reading the right-most 16 hex
# characters (as opposed to overflowing a U64 and starting a new trace).
trace_id = trace_id[-16:]
elif 'zipkin.trace_id_generator' in request.registry.settings:
trace_id = _convert_signed_hex(request.registry.settings[
'zipkin.trace_id_generator'](request))
else:
trace_id = generate_random_64bit_string()
return trace_id | Gets the trace id based on a request. If not present with the request,
create a custom (depending on config: `zipkin.trace_id_generator`) or a
completely random trace id.
:param: current active pyramid request
:returns: a 64-bit hex string | entailment |
def _convert_signed_hex(s):
"""Takes a signed hex string that begins with '0x' and converts it to
a 16-character string representing an unsigned hex value.
Examples:
'0xd68adf75f4cfd13' => 'd68adf75f4cfd13'
'-0x3ab5151d76fb85e1' => 'c54aeae289047a1f'
"""
if s.startswith('0x') or s.startswith('-0x'):
s = '{0:x}'.format(struct.unpack('Q', struct.pack('q', int(s, 16)))[0])
return s.zfill(16) | Takes a signed hex string that begins with '0x' and converts it to
a 16-character string representing an unsigned hex value.
Examples:
'0xd68adf75f4cfd13' => 'd68adf75f4cfd13'
'-0x3ab5151d76fb85e1' => 'c54aeae289047a1f' | entailment |
def should_not_sample_path(request):
"""Decided whether current request path should be sampled or not. This is
checked previous to `should_not_sample_route` and takes precedence.
:param: current active pyramid request
:returns: boolean whether current request path is blacklisted.
"""
blacklisted_paths = request.registry.settings.get(
'zipkin.blacklisted_paths', [])
# Only compile strings, since even recompiling existing
# compiled regexes takes time.
regexes = [
re.compile(r) if isinstance(r, six.string_types) else r
for r in blacklisted_paths
]
return any(r.match(request.path) for r in regexes) | Decided whether current request path should be sampled or not. This is
checked previous to `should_not_sample_route` and takes precedence.
:param: current active pyramid request
:returns: boolean whether current request path is blacklisted. | entailment |
def should_not_sample_route(request):
"""Decided whether current request route should be sampled or not.
:param: current active pyramid request
:returns: boolean whether current request route is blacklisted.
"""
blacklisted_routes = request.registry.settings.get(
'zipkin.blacklisted_routes', [])
if not blacklisted_routes:
return False
route_mapper = request.registry.queryUtility(IRoutesMapper)
route_info = route_mapper(request).get('route')
return (route_info and route_info.name in blacklisted_routes) | Decided whether current request route should be sampled or not.
:param: current active pyramid request
:returns: boolean whether current request route is blacklisted. | entailment |
def is_tracing(request):
"""Determine if zipkin should be tracing
1) Check whether the current request path is blacklisted.
2) If not, check whether the current request route is blacklisted.
3) If not, check if specific sampled header is present in the request.
4) If not, Use a tracing percent (default: 0.5%) to decide.
:param request: pyramid request object
:returns: boolean True if zipkin should be tracing
"""
if should_not_sample_path(request):
return False
elif should_not_sample_route(request):
return False
elif 'X-B3-Sampled' in request.headers:
return request.headers.get('X-B3-Sampled') == '1'
else:
zipkin_tracing_percent = request.registry.settings.get(
'zipkin.tracing_percent', DEFAULT_REQUEST_TRACING_PERCENT)
return should_sample_as_per_zipkin_tracing_percent(
zipkin_tracing_percent) | Determine if zipkin should be tracing
1) Check whether the current request path is blacklisted.
2) If not, check whether the current request route is blacklisted.
3) If not, check if specific sampled header is present in the request.
4) If not, Use a tracing percent (default: 0.5%) to decide.
:param request: pyramid request object
:returns: boolean True if zipkin should be tracing | entailment |
def create_zipkin_attr(request):
"""Create ZipkinAttrs object from a request with sampled flag as True.
Attaches lazy attribute `zipkin_trace_id` with request which is then used
throughout the tween.
Consumes custom is_tracing function to determine if the request is traced
if one is set in the pyramid registry.
:param request: pyramid request object
:rtype: :class:`pyramid_zipkin.request_helper.ZipkinAttrs`
"""
settings = request.registry.settings
if 'zipkin.is_tracing' in settings:
is_sampled = settings['zipkin.is_tracing'](request)
else:
is_sampled = is_tracing(request)
request.zipkin_trace_id = get_trace_id(request)
span_id = request.headers.get(
'X-B3-SpanId', generate_random_64bit_string())
parent_span_id = request.headers.get('X-B3-ParentSpanId', None)
flags = request.headers.get('X-B3-Flags', '0')
return ZipkinAttrs(
trace_id=request.zipkin_trace_id,
span_id=span_id,
parent_span_id=parent_span_id,
flags=flags,
is_sampled=is_sampled,
) | Create ZipkinAttrs object from a request with sampled flag as True.
Attaches lazy attribute `zipkin_trace_id` with request which is then used
throughout the tween.
Consumes custom is_tracing function to determine if the request is traced
if one is set in the pyramid registry.
:param request: pyramid request object
:rtype: :class:`pyramid_zipkin.request_helper.ZipkinAttrs` | entailment |
def get_binary_annotations(request, response):
"""Helper method for getting all binary annotations from the request.
:param request: the Pyramid request object
:param response: the Pyramid response object
:returns: binary annotation dict of {str: str}
"""
route = request.matched_route.pattern if request.matched_route else ''
annotations = {
'http.uri': request.path,
'http.uri.qs': request.path_qs,
'http.route': route,
'response_status_code': str(response.status_code),
}
settings = request.registry.settings
if 'zipkin.set_extra_binary_annotations' in settings:
annotations.update(
settings['zipkin.set_extra_binary_annotations'](request, response)
)
return annotations | Helper method for getting all binary annotations from the request.
:param request: the Pyramid request object
:param response: the Pyramid response object
:returns: binary annotation dict of {str: str} | entailment |
def dmxData(self, data: tuple):
"""
For legacy devices and to prevent errors, the length of the DMX data is normalized to 512
"""
newData = [0]*512
for i in range(0, min(len(data), 512)):
newData[i] = data[i]
self._dmxData = tuple(newData)
# in theory this class supports dynamic length, so the next line is correcting the length
self.length = 126 + len(self._dmxData) | For legacy devices and to prevent errors, the length of the DMX data is normalized to 512 | entailment |
def make_data_packet(raw_data) -> 'DataPacket':
"""
Converts raw byte data to a sACN DataPacket. Note that the raw bytes have to come from a 2016 sACN Message.
This does not support Sync Addresses, Force_Sync option and DMX Start code!
:param raw_data: raw bytes as tuple or list
:return: a DataPacket with the properties set like the raw bytes
"""
# Check if the length is sufficient
if len(raw_data) < 126:
raise TypeError('The length of the provided data is not long enough! Min length is 126!')
# Check if the three Vectors are correct
if tuple(raw_data[18:22]) != tuple(VECTOR_ROOT_E131_DATA) or \
tuple(raw_data[40:44]) != tuple(VECTOR_E131_DATA_PACKET) or \
raw_data[117] != VECTOR_DMP_SET_PROPERTY: # REMEMBER: when slicing: [inclusive:exclusive]
raise TypeError('Some of the vectors in the given raw data are not compatible to the E131 Standard!')
tmpPacket = DataPacket(cid=raw_data[22:38], sourceName=str(raw_data[44:108]),
universe=(0xFF * raw_data[113]) + raw_data[114]) # high byte first
tmpPacket.priority = raw_data[108]
# SyncAddress in the future?!
tmpPacket.sequence = raw_data[111]
tmpPacket.option_PreviewData = bool(raw_data[112] & 0b10000000) # use the 7th bit as preview_data
tmpPacket.option_StreamTerminated = bool(raw_data[112] & 0b01000000)
tmpPacket.dmxData = raw_data[126:638]
return tmpPacket | Converts raw byte data to a sACN DataPacket. Note that the raw bytes have to come from a 2016 sACN Message.
This does not support Sync Addresses, Force_Sync option and DMX Start code!
:param raw_data: raw bytes as tuple or list
:return: a DataPacket with the properties set like the raw bytes | entailment |
def getBytes(self) -> list:
'''Returns the Root layer as list with bytes'''
tmpList = []
tmpList.extend(_FIRST_INDEX)
# first append the high byte from the Flags and Length
# high 4 bit: 0x7 then the bits 8-11(indexes) from _length
length = self.length - 16
tmpList.append((0x7 << 4) + (length >> 8))
# Then append the lower 8 bits from _length
tmpList.append(length & 0xFF)
tmpList.extend(self._vector)
tmpList.extend(self._cid)
return tmpList | Returns the Root layer as list with bytes | entailment |
def curate_skills_data(self, skills_data):
""" Sync skills_data with actual skills on disk. """
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data | Sync skills_data with actual skills on disk. | entailment |
def sync_skills_data(self):
""" Update internal skill_data_structure from disk. """
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data) | Update internal skill_data_structure from disk. | entailment |
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data) | Write skills data hash if it has been modified. | entailment |
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry) | Install by url or name | entailment |
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return | Remove by url or name | entailment |
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time() | Update all downloaded skills or one specified skill. | entailment |
def apply(self, func, skills):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills) | Run a function on all skills in parallel | entailment |
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults()) | Installs the default skills, updates all others | entailment |
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Returns {'skill_group': [SkillEntry('name')]}"""
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults | Returns {'skill_group': [SkillEntry('name')]} | entailment |
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills | Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to | entailment |
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill | Find skill by name or url | entailment |
def iterchunks(data, chunksize):
"""iterate chunks of data
"""
offt = 0
while offt < len(data):
yield data[offt:offt+chunksize]
offt += chunksize | iterate chunks of data | entailment |
def segmentAcceptable(RCV_NXT, RCV_WND, SEG_SEQ, SEG_LEN):
"""
An acceptable segment: RFC 793 page 26.
"""
if SEG_LEN == 0 and RCV_WND == 0:
return SEG_SEQ == RCV_NXT
if SEG_LEN == 0 and RCV_WND > 0:
return ((RCV_NXT <= SEG_SEQ) and (SEG_SEQ < RCV_NXT + RCV_WND))
if SEG_LEN > 0 and RCV_WND == 0:
return False
if SEG_LEN > 0 and RCV_WND > 0:
return (( (RCV_NXT <= SEG_SEQ) and (SEG_SEQ < RCV_NXT + RCV_WND))
or ((RCV_NXT <= SEG_SEQ+SEG_LEN-1) and
(SEG_SEQ+SEG_LEN-1 < RCV_NXT + RCV_WND)))
assert 0, 'Should be impossible to get here.'
return False | An acceptable segment: RFC 793 page 26. | entailment |
def mustRetransmit(self):
"""
Check to see if this packet must be retransmitted until it was
received.
Packets which contain a connection-state changing flag (SYN or FIN) or
a non-zero amount of data can be retransmitted.
"""
if self.syn or self.fin or self.dlen:
return True
return False | Check to see if this packet must be retransmitted until it was
received.
Packets which contain a connection-state changing flag (SYN or FIN) or
a non-zero amount of data can be retransmitted. | entailment |
def ackSoon(self):
"""
Emit an acknowledgement packet soon.
"""
if self._ackTimer is None:
def originateAck():
self._ackTimer = None
self.originate(ack=True)
self._ackTimer = reactor.callLater(0.1, originateAck)
else:
self._ackTimer.reset(ACK_DELAY) | Emit an acknowledgement packet soon. | entailment |
def originate(self, data='', syn=False, ack=False, fin=False, rst=False):
"""
Create a packet, enqueue it to be sent, and return it.
"""
if self._ackTimer is not None:
self._ackTimer.cancel()
self._ackTimer = None
if syn:
# We really should be randomizing the ISN but until we finish the
# implementations of the various bits of wraparound logic that were
# started with relativeSequence
assert self.nextSendSeqNum == 0, (
"NSSN = " + repr(self.nextSendSeqNum))
assert self.hostSendISN == 0
p = PTCPPacket.create(self.hostPseudoPort,
self.peerPseudoPort,
seqNum=(self.nextSendSeqNum +
self.hostSendISN) % (2**32),
ackNum=self.currentAckNum(),
data=data,
window=self.recvWindow,
syn=syn, ack=ack, fin=fin, rst=rst,
destination=self.peerAddressTuple)
# do we want to enqueue this packet for retransmission?
sl = p.segmentLength()
self.nextSendSeqNum += sl
if p.mustRetransmit():
# print self, 'originating retransmittable packet', len(self.retransmissionQueue)
if self.retransmissionQueue:
if self.retransmissionQueue[-1].fin:
raise AssertionError("Sending %r after FIN??!" % (p,))
# print 'putting it on the queue'
self.retransmissionQueue.append(p)
# print 'and sending it later'
self._retransmitLater()
if not self.sendWindowRemaining: # len(self.retransmissionQueue) > 5:
# print 'oh no my queue is too big'
# This is a random number (5) because I ought to be summing the
# packet lengths or something.
self._writeBufferFull()
else:
# print 'my queue is still small enough', len(self.retransmissionQueue), self, self.sendWindowRemaining
pass
self.ptcp.sendPacket(p)
return p | Create a packet, enqueue it to be sent, and return it. | entailment |
def connectionJustEstablished(self):
"""
We sent out SYN, they acknowledged it. Congratulations, you
have a new baby connection.
"""
assert not self.disconnecting
assert not self.disconnected
try:
p = self.factory.buildProtocol(PTCPAddress(
self.peerAddressTuple, self.pseudoPortPair))
p.makeConnection(self)
except:
log.msg("Exception during PTCP connection setup.")
log.err()
self.loseConnection()
else:
self.protocol = p | We sent out SYN, they acknowledged it. Congratulations, you
have a new baby connection. | entailment |
def connect(self, factory, host, port, pseudoPort=1):
"""
Attempt to establish a new connection via PTCP to the given
remote address.
@param factory: A L{ClientFactory} which will be used to
create an L{IProtocol} provider if the connection is
successfully set up, or which will have failure callbacks
invoked on it otherwise.
@param host: The IP address of another listening PTCP port to
connect to.
@type host: C{str}
@param port: The port number of that other listening PTCP port
to connect to.
@type port: C{int}
@param pseudoPort: Not really implemented. Do not pass a
value for this parameter or things will break.
@return: A L{PTCPConnection} instance representing the new
connection, but you really shouldn't use this for
anything. Write a protocol!
"""
sourcePseudoPort = genConnID() % MAX_PSEUDO_PORT
conn = self._connections[(pseudoPort, sourcePseudoPort, (host, port))
] = PTCPConnection(
sourcePseudoPort, pseudoPort, self, factory, (host, port))
conn.machine.appActiveOpen()
return conn | Attempt to establish a new connection via PTCP to the given
remote address.
@param factory: A L{ClientFactory} which will be used to
create an L{IProtocol} provider if the connection is
successfully set up, or which will have failure callbacks
invoked on it otherwise.
@param host: The IP address of another listening PTCP port to
connect to.
@type host: C{str}
@param port: The port number of that other listening PTCP port
to connect to.
@type port: C{int}
@param pseudoPort: Not really implemented. Do not pass a
value for this parameter or things will break.
@return: A L{PTCPConnection} instance representing the new
connection, but you really shouldn't use this for
anything. Write a protocol! | entailment |
def _finalCleanup(self):
"""
Clean up all of our connections by issuing application-level close and
stop notifications, sending hail-mary final FIN packets (which may not
reach the other end, but nevertheless can be useful) when possible.
"""
for conn in self._connections.values():
conn.releaseConnectionResources()
assert not self._connections | Clean up all of our connections by issuing application-level close and
stop notifications, sending hail-mary final FIN packets (which may not
reach the other end, but nevertheless can be useful) when possible. | entailment |
def waitForAllConnectionsToClose(self):
"""
Wait for all currently-open connections to enter the 'CLOSED' state.
Currently this is only usable from test fixtures.
"""
if not self._connections:
return self._stop()
return self._allConnectionsClosed.deferred().addBoth(self._stop) | Wait for all currently-open connections to enter the 'CLOSED' state.
Currently this is only usable from test fixtures. | entailment |
def _argumentForLoader(loaderClass):
"""
Create an AMP argument for (de-)serializing instances of C{loaderClass}.
@param loaderClass: A type object with a L{load} class method that takes
some bytes and returns an instance of itself, and a L{dump} instance
method that returns some bytes.
@return: a class decorator which decorates an AMP argument class by
replacing it with the one defined for loading and saving C{loaderClass}
instances.
"""
def decorator(argClass):
class LoadableArgument(String):
def toString(self, arg):
assert isinstance(arg, loaderClass), \
("%r not %r" % (arg, loaderClass))
return String.toString(self, arg.dump())
def fromString(self, arg):
return loaderClass.load(String.fromString(self, arg))
LoadableArgument.__name__ = argClass.__name__
return LoadableArgument
return decorator | Create an AMP argument for (de-)serializing instances of C{loaderClass}.
@param loaderClass: A type object with a L{load} class method that takes
some bytes and returns an instance of itself, and a L{dump} instance
method that returns some bytes.
@return: a class decorator which decorates an AMP argument class by
replacing it with the one defined for loading and saving C{loaderClass}
instances. | entailment |
def fromString(self, inStr):
"""
Convert the given bytes into a C{(host, port)} tuple.
@param inStr: bytes in the format C{host:port}
@type inStr: L{bytes}
@return: a C{(host, port)} tuple
@rtype: 2-L{tuple} of L{bytes}, L{int}
"""
host, sPort = inStr.split(":")
return (host, int(sPort)) | Convert the given bytes into a C{(host, port)} tuple.
@param inStr: bytes in the format C{host:port}
@type inStr: L{bytes}
@return: a C{(host, port)} tuple
@rtype: 2-L{tuple} of L{bytes}, L{int} | entailment |
def setup_Q2Q(self, path,
q2qPortnum=q2q.port,
inboundTCPPortnum=q2q.port+1,
publicIP=None
):
"""Set up a Q2Q service.
"""
store = DirectoryCertificateAndUserStore(path)
# store.addPrivateCertificate("kazekage")
# store.addUser("kazekage", "username", "password1234")
self.attach(q2q.Q2QService(
protocolFactoryFactory=IdentityAdminFactory(store).examineRequest,
certificateStorage=store,
portal=Portal(store, checkers=[store]),
q2qPortnum=q2qPortnum,
inboundTCPPortnum=inboundTCPPortnum,
publicIP=publicIP,
)) | Set up a Q2Q service. | entailment |
def replace_macros(string, spec=None):
"""Replace all macros in given string with corresponding values.
For example: a string '%{name}-%{version}.tar.gz' will be transformed to 'foo-2.0.tar.gz'.
:param string A string containing macros that you want to be replaced
:param spec An optional spec file. If given, definitions in that spec
file will be used to replace macros.
:return A string where all macros in given input are substituted as good as possible.
"""
if spec:
assert isinstance(spec, Spec)
def _is_conditional(macro: str) -> bool:
return macro.startswith("?") or macro.startswith("!")
def _test_conditional(macro: str) -> bool:
if macro[0] == "?":
return True
if macro[0] == "!":
return False
raise Exception("Given string is not a conditional macro")
def _macro_repl(match):
macro_name = match.group(1)
if _is_conditional(macro_name) and spec:
parts = macro_name[1:].split(sep=":", maxsplit=1)
assert parts
if _test_conditional(macro_name): # ?
if hasattr(spec, parts[0]):
if len(parts) == 2:
return parts[1]
return getattr(spec, parts[0], None)
return ""
else: # !
if not hasattr(spec, parts[0]):
if len(parts) == 2:
return parts[1]
return getattr(spec, parts[0], None)
return ""
if spec:
value = getattr(spec, macro_name, None)
if value:
return str(value)
return match.string[match.start() : match.end()]
# Recursively expand macros
# Note: If macros are not defined in the spec file, this won't try to
# expand them.
while True:
ret = re.sub(_macro_pattern, _macro_repl, string)
if ret != string:
string = ret
continue
return ret | Replace all macros in given string with corresponding values.
For example: a string '%{name}-%{version}.tar.gz' will be transformed to 'foo-2.0.tar.gz'.
:param string A string containing macros that you want to be replaced
:param spec An optional spec file. If given, definitions in that spec
file will be used to replace macros.
:return A string where all macros in given input are substituted as good as possible. | entailment |
def update(self, spec_obj, context, match_obj, line):
"""Update given spec object and parse context and return them again.
:param spec_obj: An instance of Spec class
:param context: The parse context
:param match_obj: The re.match object
:param line: The original line
:return: Given updated Spec instance and parse context dictionary.
"""
assert spec_obj
assert context
assert match_obj
assert line
return self.update_impl(spec_obj, context, match_obj, line) | Update given spec object and parse context and return them again.
:param spec_obj: An instance of Spec class
:param context: The parse context
:param match_obj: The re.match object
:param line: The original line
:return: Given updated Spec instance and parse context dictionary. | entailment |
def packages_dict(self):
"""All packages in this RPM spec as a dictionary.
You can access the individual packages by their package name, e.g.,
git_spec.packages_dict['git-doc']
"""
assert self.packages
return dict(zip([package.name for package in self.packages], self.packages)) | All packages in this RPM spec as a dictionary.
You can access the individual packages by their package name, e.g.,
git_spec.packages_dict['git-doc'] | entailment |
def from_file(filename):
"""Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object.
"""
spec = Spec()
with open(filename, "r", encoding="utf-8") as f:
parse_context = {"current_subpackage": None}
for line in f:
spec, parse_context = _parse(spec, parse_context, line)
return spec | Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object. | entailment |
def from_string(string: str):
"""Creates a new Spec object from a given string.
:param string: The contents of a spec file.
:return: A new Spec object.
"""
spec = Spec()
parse_context = {"current_subpackage": None}
for line in string.splitlines():
spec, parse_context = _parse(spec, parse_context, line)
return spec | Creates a new Spec object from a given string.
:param string: The contents of a spec file.
:return: A new Spec object. | entailment |
def parse_acl(acl_iter):
"""Parse a string, or list of ACE definitions, into usable ACEs."""
if isinstance(acl_iter, basestring):
acl_iter = [acl_iter]
for chunk in acl_iter:
if isinstance(chunk, basestring):
chunk = chunk.splitlines()
chunk = [re.sub(r'#.+', '', line).strip() for line in chunk]
chunk = filter(None, chunk)
else:
chunk = [chunk]
for ace in chunk:
# If this was provided as a string, then parse the permission set.
# Otherwise, use it as-is, which will result in an equality test.
if isinstance(ace, basestring):
ace = ace.split(None, 2)
state, predicate, permission_set = ace
yield parse_state(state), parse_predicate(predicate), parse_permission_set(permission_set)
else:
state, predicate, permission_set = ace
yield parse_state(state), parse_predicate(predicate), permission_set | Parse a string, or list of ACE definitions, into usable ACEs. | entailment |
def iter_object_acl(root):
"""Child-first discovery of ACEs for an object.
Walks the ACL graph via ``__acl_bases__`` and yields the ACEs parsed from
``__acl__`` on each object.
"""
for obj in iter_object_graph(root):
for ace in parse_acl(getattr(obj, '__acl__', ())):
yield ace | Child-first discovery of ACEs for an object.
Walks the ACL graph via ``__acl_bases__`` and yields the ACEs parsed from
``__acl__`` on each object. | entailment |
def get_object_context(root):
"""Depth-first discovery of authentication context for an object.
Walks the ACL graph via ``__acl_bases__`` and merges the ``__acl_context__``
attributes.
"""
context = {}
for obj in iter_object_graph(root, parents_first=True):
context.update(getattr(obj, '__acl_context__', {}))
return context | Depth-first discovery of authentication context for an object.
Walks the ACL graph via ``__acl_bases__`` and merges the ``__acl_context__``
attributes. | entailment |
def load_skills_data() -> dict:
"""Contains info on how skills should be updated"""
skills_data_file = expanduser('~/.mycroft/skills.json')
if isfile(skills_data_file):
try:
with open(skills_data_file) as f:
return json.load(f)
except json.JSONDecodeError:
return {}
else:
return {} | Contains info on how skills should be updated | entailment |
def get_skill_entry(name, skills_data) -> dict:
""" Find a skill entry in the skills_data and returns it. """
for e in skills_data.get('skills', []):
if e.get('name') == name:
return e
return {} | Find a skill entry in the skills_data and returns it. | entailment |
def is_legal_sequence(self, packet: DataPacket) -> bool:
"""
Check if the Sequence number of the DataPacket is legal.
For more information see page 17 of http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf.
:param packet: the packet to check
:return: true if the sequence is legal. False if the sequence number is bad
"""
# if the sequence of the packet is smaller than the last received sequence, return false
# therefore calculate the difference between the two values:
try: # try, because self.lastSequence might not been initialized
diff = packet.sequence - self.lastSequence[packet.universe]
# if diff is between ]-20,0], return False for a bad packet sequence
if 0 >= diff > -20:
return False
except:
pass
# if the sequence is good, return True and refresh the list with the new value
self.lastSequence[packet.universe] = packet.sequence
return True | Check if the Sequence number of the DataPacket is legal.
For more information see page 17 of http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf.
:param packet: the packet to check
:return: true if the sequence is legal. False if the sequence number is bad | entailment |
def is_legal_priority(self, packet: DataPacket):
"""
Check if the given packet has high enough priority for the stored values for the packet's universe.
:param packet: the packet to check
:return: returns True if the priority is good. Otherwise False
"""
# check if the packet's priority is high enough to get processed
if packet.universe not in self.callbacks.keys() or \
packet.priority < self.priorities[packet.universe][0]:
return False # return if the universe is not interesting
else:
return True | Check if the given packet has high enough priority for the stored values for the packet's universe.
:param packet: the packet to check
:return: returns True if the priority is good. Otherwise False | entailment |
def deploy(Class, name=None, uid=None, gid=None, **kw):
"""
Create an application with the give name, uid, and gid.
The application has one child service, an instance of Class
configured based on the additional keyword arguments passed.
The application is not persistable.
@param Class:
@param name:
@param uid:
@param gid:
@param kw:
@return:
"""
svc = Class(**kw)
if name is None:
name = Class.__name__
# Make it easier (possible) to find this service by name later on
svc.setName(name)
app = service.Application(name, uid=uid, gid=gid)
app.addComponent(NotPersistable(app), ignoreClass=True)
svc.setServiceParent(app)
return app | Create an application with the give name, uid, and gid.
The application has one child service, an instance of Class
configured based on the additional keyword arguments passed.
The application is not persistable.
@param Class:
@param name:
@param uid:
@param gid:
@param kw:
@return: | entailment |
def addServer(self, normalPort, sslPort, f, name):
"""
Add a TCP and an SSL server. Name them `name` and `name`+'s'.
@param normalPort:
@param sslPort:
@param f:
@param name:
"""
tcp = internet.TCPServer(normalPort, f)
tcp.setName(name)
self.servers.append(tcp)
if sslPort is not None:
ssl = internet.SSLServer(sslPort, f, contextFactory=self.sslfac)
ssl.setName(name+'s')
self.servers.append(ssl) | Add a TCP and an SSL server. Name them `name` and `name`+'s'.
@param normalPort:
@param sslPort:
@param f:
@param name: | entailment |
def _identify(self, subject):
"""
Implementation of L{Identify}.
"""
ourPrivateCert = self.service.certificateStorage.getPrivateCertificate(
str(subject)
)
ourCA = Certificate(ourPrivateCert.original)
return dict(certificate=ourCA) | Implementation of L{Identify}. | entailment |
def verifyCertificateAllowed(self,
ourAddress,
theirAddress):
"""
Check that the cert currently in use by this transport is valid to
claim that the connection offers authorization for this host speaking
for C{ourAddress}, to a host speaking for C{theirAddress}. The remote
host (the one claiming to use theirAddress) may have a certificate
which is issued for the domain for theirAddress or the full address
given in theirAddress.
This method runs B{after} cryptographic verification of the validity of
certificates, although it does not perform any cryptographic checks
itself. It depends on SSL connection handshaking - *and* the
particular certificate lookup logic which prevents spoofed Issuer
fields, to work properly. However, all it checks is the X509 names
present in the certificates matching with the application-level
security claims being made by our peer.
An example of successful verification, because both parties have
properly signed certificates for their usage from the domain they
have been issued::
our current certficate:
issuer: divmod.com
subject: glyph@divmod.com
their current certificate:
issuer: twistedmatrix.com
subject: exarkun@twistedmatrix.com
Arguments to verifyCertificateAllowed:
ourAddress: glyph@divmod.com
theirAddress: exarkun@twistedmatrix.com
Result of verifyCertificateAllowed: None
An example of rejected verification, because domain certificates are
always B{self}-signed in Q2Q; verisign is not a trusted certificate
authority for the entire internet as with some other TLS
implementations::
our current certificate:
issuer: divmod.com
subject: divmod.com
their current certificate:
issuer: verisign.com
subject: twistedmatrix.com
Arguments to verifyCertificateAllowed:
ourAddress: divmod.com
theirAddress: twistedmatrix.com
Result of verifyCertificateAllowed: exception VerifyError raised
Another example of successful verification, because we assume our
current certificate is under the control of this side of the
connection, so *any* claimed subject is considered acceptable::
our current certificate:
issuer: divmod.com
subject: divmod.com
their current certificate:
issuer: divmod.com
subject: glyph@twistedmatrix.com
Arguments to verifyCertificateAllowed:
ourAddress: divmod.com
theirAddress: glyph@twistedmatrix.com
Result of verifyCertificateAllowed: None
Another example of successful verification, because the user is
claiming to be anonymous; there is also a somewhat looser
cryptographic check applied to signatures for anonymous
connections::
our current certificate:
issuer: divmod.com
subject: divmod.com
their current certificate:
issuer: @
subject: @
arguments to verifyCertificateAllowed:
ourAddress: divmod.com
theirAddress: @
Result of verifyCertificateAllowed: None
Accept anonymous connections with caution.
@param ourAddress: a L{Q2QAddress} representing the address that we are
supposed to have authority for, requested by our peer.
@param theirAddress: a L{Q2QAddress} representing the address that our
network peer claims to be communicating on behalf of. For example, if
our peer is foobar.com they may claim to be operating on behalf of any
user @foobar.com.
@raise: L{VerifyError} if the certificates do not match the
claimed addresses.
"""
# XXX TODO: Somehow, it's got to be possible for a single cluster to
# internally claim to be agents of any other host when issuing a
# CONNECT; in other words, we always implicitly trust ourselves. Also,
# we might want to issue anonymous CONNECTs over unencrypted
# connections.
# IOW: *we* can sign a certificate to be whoever, but the *peer* can
# only sign the certificate to be the peer.
# The easiest way to make this work is to issue ourselves a wildcard
# certificate.
if not self.authorized:
if theirAddress.domain == '':
# XXX TODO: document this rule, anonymous connections are
# allowed to not be authorized because they are not making any
# claims about who they are
# XXX also TODO: make it so that anonymous connections are
# disabled by default for most protocols
return True
raise VerifyError("No official negotiation has taken place.")
peerCert = Certificate.peerFromTransport(self.transport)
ourCert = self.hostCertificate
ourClaimedDomain = ourAddress.domainAddress()
theirClaimedDomain = theirAddress.domainAddress()
# Sanity check #1: did we pick the right certificate on our end?
if not ourClaimedDomain.claimedAsIssuerOf(ourCert):
raise VerifyError(
"Something has gone horribly wrong: local domain mismatch "
"claim: %s actual: %s" % (ourClaimedDomain,
ourCert.getIssuer()))
if theirClaimedDomain.claimedAsIssuerOf(peerCert):
# Their domain issued their certificate.
if (theirAddress.claimedAsSubjectOf(peerCert) or
theirClaimedDomain.claimedAsSubjectOf(peerCert)):
return
elif ourClaimedDomain.claimedAsIssuerOf(peerCert):
# *our* domain can spoof *anything*
return
elif ourAddress.claimedAsIssuerOf(peerCert):
# Neither our domain nor their domain signed this. Did *we*?
# (Useful in peer-to-peer persistent transactions where we don't
# want the server involved: exarkun@twistedmatrix.com can sign
# glyph@divmod.com's certificate).
return
raise VerifyError(
"Us: %s Them: %s "
"TheyClaimWeAre: %s TheyClaimTheyAre: %s" %
(ourCert, peerCert,
ourAddress, theirAddress)) | Check that the cert currently in use by this transport is valid to
claim that the connection offers authorization for this host speaking
for C{ourAddress}, to a host speaking for C{theirAddress}. The remote
host (the one claiming to use theirAddress) may have a certificate
which is issued for the domain for theirAddress or the full address
given in theirAddress.
This method runs B{after} cryptographic verification of the validity of
certificates, although it does not perform any cryptographic checks
itself. It depends on SSL connection handshaking - *and* the
particular certificate lookup logic which prevents spoofed Issuer
fields, to work properly. However, all it checks is the X509 names
present in the certificates matching with the application-level
security claims being made by our peer.
An example of successful verification, because both parties have
properly signed certificates for their usage from the domain they
have been issued::
our current certficate:
issuer: divmod.com
subject: glyph@divmod.com
their current certificate:
issuer: twistedmatrix.com
subject: exarkun@twistedmatrix.com
Arguments to verifyCertificateAllowed:
ourAddress: glyph@divmod.com
theirAddress: exarkun@twistedmatrix.com
Result of verifyCertificateAllowed: None
An example of rejected verification, because domain certificates are
always B{self}-signed in Q2Q; verisign is not a trusted certificate
authority for the entire internet as with some other TLS
implementations::
our current certificate:
issuer: divmod.com
subject: divmod.com
their current certificate:
issuer: verisign.com
subject: twistedmatrix.com
Arguments to verifyCertificateAllowed:
ourAddress: divmod.com
theirAddress: twistedmatrix.com
Result of verifyCertificateAllowed: exception VerifyError raised
Another example of successful verification, because we assume our
current certificate is under the control of this side of the
connection, so *any* claimed subject is considered acceptable::
our current certificate:
issuer: divmod.com
subject: divmod.com
their current certificate:
issuer: divmod.com
subject: glyph@twistedmatrix.com
Arguments to verifyCertificateAllowed:
ourAddress: divmod.com
theirAddress: glyph@twistedmatrix.com
Result of verifyCertificateAllowed: None
Another example of successful verification, because the user is
claiming to be anonymous; there is also a somewhat looser
cryptographic check applied to signatures for anonymous
connections::
our current certificate:
issuer: divmod.com
subject: divmod.com
their current certificate:
issuer: @
subject: @
arguments to verifyCertificateAllowed:
ourAddress: divmod.com
theirAddress: @
Result of verifyCertificateAllowed: None
Accept anonymous connections with caution.
@param ourAddress: a L{Q2QAddress} representing the address that we are
supposed to have authority for, requested by our peer.
@param theirAddress: a L{Q2QAddress} representing the address that our
network peer claims to be communicating on behalf of. For example, if
our peer is foobar.com they may claim to be operating on behalf of any
user @foobar.com.
@raise: L{VerifyError} if the certificates do not match the
claimed addresses. | entailment |
def _listen(self, protocols, From, description):
"""
Implementation of L{Listen}.
"""
# The peer is coming from a client-side representation of the user
# described by 'From', and talking *to* a server-side representation of
# the user described by 'From'.
self.verifyCertificateAllowed(From, From)
theirCert = Certificate.peerFromTransport(self.transport)
for protocolName in protocols:
if protocolName.startswith('.'):
raise VerifyError(
"Internal protocols are for server-server use _only_: %r" %
protocolName)
key = (From, protocolName)
value = (self, theirCert, description)
log.msg("%r listening for %r" % key)
self.listeningClient.append((key, value))
self.service.listeningClients.setdefault(key, []).append(value)
return {} | Implementation of L{Listen}. | entailment |
def _inbound(self, From, to, protocol, udp_source=None):
"""
Implementation of L{Inbound}.
"""
# Verify stuff!
self.verifyCertificateAllowed(to, From)
return self.service.verifyHook(From, to, protocol
).addCallback(self._inboundimpl,
From,
to,
protocol,
udp_source).addErrback(
lambda f: f.trap(KeyError) and dict(listeners=[])) | Implementation of L{Inbound}. | entailment |
def _write(self, body, id):
"""
Respond to a WRITE command, sending some data over a virtual channel
created by VIRTUAL. The answer is simply an acknowledgement, as it is
simply meant to note that the write went through without errors.
An occurrence of I{Write} on the wire, together with the response
generated by this method, might have this apperance::
C: -Command: Write
C: -Ask: 1
C: -Length: 13
C: Id: glyph@divmod.com->radix@twistedmatrix.com:q2q-example:0
C:
C: HELLO WORLD
C:
S: -Answer: 1
S:
"""
if id not in self.connections:
raise error.ConnectionDone()
connection = self.connections[id]
connection.dataReceived(body)
return {} | Respond to a WRITE command, sending some data over a virtual channel
created by VIRTUAL. The answer is simply an acknowledgement, as it is
simply meant to note that the write went through without errors.
An occurrence of I{Write} on the wire, together with the response
generated by this method, might have this apperance::
C: -Command: Write
C: -Ask: 1
C: -Length: 13
C: Id: glyph@divmod.com->radix@twistedmatrix.com:q2q-example:0
C:
C: HELLO WORLD
C:
S: -Answer: 1
S: | entailment |
def _close(self, id):
"""
Respond to a CLOSE command, dumping some data onto the stream. As with
WRITE, this returns an empty acknowledgement.
An occurrence of I{Close} on the wire, together with the response
generated by this method, might have this apperance::
C: -Command: Close
C: -Ask: 1
C: Id: glyph@divmod.com->radix@twistedmatrix.com:q2q-example:0
C:
S: -Answer: 1
S:
"""
# The connection is removed from the mapping by connectionLost.
connection = self.connections[id]
connection.connectionLost(Failure(CONNECTION_DONE))
return {} | Respond to a CLOSE command, dumping some data onto the stream. As with
WRITE, this returns an empty acknowledgement.
An occurrence of I{Close} on the wire, together with the response
generated by this method, might have this apperance::
C: -Command: Close
C: -Ask: 1
C: Id: glyph@divmod.com->radix@twistedmatrix.com:q2q-example:0
C:
S: -Answer: 1
S: | entailment |
def _sign(self, certificate_request, password):
"""
Respond to a request to sign a CSR for a user or agent located within
our domain.
"""
if self.service.portal is None:
raise BadCertificateRequest("This agent cannot sign certificates.")
subj = certificate_request.getSubject()
sk = subj.keys()
if 'commonName' not in sk:
raise BadCertificateRequest(
"Certificate requested with bad subject: %s" % (sk,))
uandd = subj.commonName.split("@")
if len(uandd) != 2:
raise BadCertificateRequest(
"Won't sign certificates for other domains"
)
domain = uandd[1]
CS = self.service.certificateStorage
ourCert = CS.getPrivateCertificate(domain)
D = self.service.portal.login(
UsernameShadowPassword(subj.commonName, password),
self,
ivertex.IQ2QUser)
def _(ial):
(iface, aspect, logout) = ial
ser = CS.genSerial(domain)
return dict(certificate=aspect.signCertificateRequest(
certificate_request, ourCert, ser))
return D.addCallback(_) | Respond to a request to sign a CSR for a user or agent located within
our domain. | entailment |
def _secure(self, to, From, authorize):
"""
Response to a SECURE command, starting TLS when necessary, and using a
certificate identified by the I{To} header.
An occurrence of I{Secure} on the wire, together with the response
generated by this method, might have the following appearance::
C: -Command: Secure
C: -Ask: 1
C: To: divmod.com
C: From: twistedmatrix.com
C: Authorize: True
C:
Client Starts TLS here with twistedmatrix.com certificate
S: -Answer: 1
S:
Server Starts TLS here with divmod.com certificate
"""
if self.hostCertificate is not None:
raise RuntimeError("Re-encrypting already encrypted connection")
CS = self.service.certificateStorage
ourCert = CS.getPrivateCertificate(str(to.domainAddress()))
if authorize:
D = CS.getSelfSignedCertificate(str(From.domainAddress()))
else:
self.authorized = False
return {'tls_localCertificate': ourCert}
def hadCert(peerSigned):
self.authorized = True
self._cacheMeNow(From, to, authorize)
return {'tls_localCertificate': ourCert,
'tls_verifyAuthorities': [peerSigned]}
def didNotHaveCert(err):
err.trap(KeyError)
return self._retrieveRemoteCertificate(From, port)
D.addErrback(didNotHaveCert)
D.addCallback(hadCert)
return D | Response to a SECURE command, starting TLS when necessary, and using a
certificate identified by the I{To} header.
An occurrence of I{Secure} on the wire, together with the response
generated by this method, might have the following appearance::
C: -Command: Secure
C: -Ask: 1
C: To: divmod.com
C: From: twistedmatrix.com
C: Authorize: True
C:
Client Starts TLS here with twistedmatrix.com certificate
S: -Answer: 1
S:
Server Starts TLS here with divmod.com certificate | entailment |
def _retrieveRemoteCertificate(self, From, port=port):
"""
The entire conversation, starting with TCP handshake and ending at
disconnect, to retrieve a foreign domain's certificate for the first
time.
"""
CS = self.service.certificateStorage
host = str(From.domainAddress())
p = AMP()
p.wrapper = self.wrapper
f = protocol.ClientCreator(reactor, lambda: p)
connD = f.connectTCP(host, port)
def connected(proto):
dhost = From.domainAddress()
iddom = proto.callRemote(Identify, subject=dhost)
def gotCert(identifyBox):
theirCert = identifyBox['certificate']
theirIssuer = theirCert.getIssuer().commonName
theirName = theirCert.getSubject().commonName
if (theirName != str(dhost)):
raise VerifyError(
"%r claimed it was %r in IDENTIFY response"
% (theirName, dhost))
if (theirIssuer != str(dhost)):
raise VerifyError(
"self-signed %r claimed it was issued by "
"%r in IDENTIFY response" % (dhost, theirIssuer))
def storedCert(ignored):
return theirCert
return CS.storeSelfSignedCertificate(
str(dhost), theirCert).addCallback(storedCert)
def nothingify(x):
proto.transport.loseConnection()
return x
return iddom.addCallback(gotCert).addBoth(nothingify)
connD.addCallback(connected)
return connD | The entire conversation, starting with TCP handshake and ending at
disconnect, to retrieve a foreign domain's certificate for the first
time. | entailment |
def secure(self, fromAddress, toAddress,
fromCertificate, foreignCertificateAuthority=None,
authorize=True):
"""
Return a Deferred which fires True when this connection has been
secured as a channel between fromAddress (locally) and
toAddress (remotely).
Raises an error if this is not possible.
"""
if self.hostCertificate is not None:
raise RuntimeError("Re-securing already secured connection.")
def _cbSecure(response):
if foreignCertificateAuthority is not None:
self.authorized = True
return True
extra = {'tls_localCertificate': fromCertificate}
if foreignCertificateAuthority is not None:
extra['tls_verifyAuthorities'] = [foreignCertificateAuthority]
return self.callRemote(
Secure,
From=fromAddress,
to=toAddress,
authorize=authorize, **extra).addCallback(_cbSecure) | Return a Deferred which fires True when this connection has been
secured as a channel between fromAddress (locally) and
toAddress (remotely).
Raises an error if this is not possible. | entailment |
def connect(self, From, to,
protocolName, clientFactory,
chooser):
"""
Issue an INBOUND command, creating a virtual connection to the peer,
given identifying information about the endpoint to connect to, and a
protocol factory.
@param clientFactory: a *Client* ProtocolFactory instance which will
generate a protocol upon connect.
@return: a Deferred which fires with the protocol instance that was
connected, or fails with AttemptsFailed if the connection was not
possible.
"""
publicIP = self._determinePublicIP()
A = dict(From=From,
to=to,
protocol=protocolName)
if self.service.dispatcher is not None:
# Tell them exactly where they can shove it
A['udp_source'] = (publicIP,
self.service.sharedUDPPortnum)
else:
# Don't tell them because we don't know
log.msg("dispatcher unavailable when connecting")
D = self.callRemote(Inbound, **A)
def _connected(answer):
listenersD = defer.maybeDeferred(chooser, answer['listeners'])
def gotListeners(listeners):
allConnectionAttempts = []
for listener in listeners:
d = self.attemptConnectionMethods(
listener['methods'],
listener['id'],
From, to,
protocolName, clientFactory,
)
allConnectionAttempts.append(d)
return defer.DeferredList(allConnectionAttempts)
listenersD.addCallback(gotListeners)
def finishedAllAttempts(results):
succeededAny = False
failures = []
if not results:
return Failure(NoAttemptsMade(
"there was no available path for connections "
"(%r->%r/%s)" % (From, to, protocolName)))
for succeeded, result in results:
if succeeded:
succeededAny = True
randomConnection = result
break
else:
failures.append(result)
if not succeededAny:
return Failure(
AttemptsFailed(
[failure.getBriefTraceback()
for failure in failures]
)
)
# XXX TODO: this connection is really random; connectQ2Q should
# not return one of the connections it's made, put it into your
# protocol's connectionMade handler
return randomConnection
return listenersD.addCallback(finishedAllAttempts)
return D.addCallback(_connected) | Issue an INBOUND command, creating a virtual connection to the peer,
given identifying information about the endpoint to connect to, and a
protocol factory.
@param clientFactory: a *Client* ProtocolFactory instance which will
generate a protocol upon connect.
@return: a Deferred which fires with the protocol instance that was
connected, or fails with AttemptsFailed if the connection was not
possible. | entailment |
def whoami(self):
"""
Return a Deferred which fires with a 2-tuple of (dotted quad ip, port
number).
"""
def cbWhoAmI(result):
return result['address']
return self.callRemote(WhoAmI).addCallback(cbWhoAmI) | Return a Deferred which fires with a 2-tuple of (dotted quad ip, port
number). | entailment |
def requestAvatarId(self, credentials):
"""
Return the ID associated with these credentials.
@param credentials: something which implements one of the interfaces in
self.credentialInterfaces.
@return: a Deferred which will fire a string which identifies an
avatar, an empty tuple to specify an authenticated anonymous user
(provided as checkers.ANONYMOUS) or fire a Failure(UnauthorizedLogin).
@see: L{twisted.cred.credentials}
"""
username, domain = credentials.username.split("@")
key = self.users.key(domain, username)
if key is None:
return defer.fail(UnauthorizedLogin())
def _cbPasswordChecked(passwordIsCorrect):
if passwordIsCorrect:
return username + '@' + domain
else:
raise UnauthorizedLogin()
return defer.maybeDeferred(credentials.checkPassword,
key).addCallback(_cbPasswordChecked) | Return the ID associated with these credentials.
@param credentials: something which implements one of the interfaces in
self.credentialInterfaces.
@return: a Deferred which will fire a string which identifies an
avatar, an empty tuple to specify an authenticated anonymous user
(provided as checkers.ANONYMOUS) or fire a Failure(UnauthorizedLogin).
@see: L{twisted.cred.credentials} | entailment |
def addPrivateCertificate(self, subjectName, existingCertificate=None):
"""
Add a PrivateCertificate object to this store for this subjectName.
If existingCertificate is None, add a new self-signed certificate.
"""
if existingCertificate is None:
assert '@' not in subjectName, "Don't self-sign user certs!"
mainDN = DistinguishedName(commonName=subjectName)
mainKey = KeyPair.generate()
mainCertReq = mainKey.certificateRequest(mainDN)
mainCertData = mainKey.signCertificateRequest(
mainDN, mainCertReq,
lambda dn: True,
self.genSerial(subjectName)
)
mainCert = mainKey.newCertificate(mainCertData)
else:
mainCert = existingCertificate
self.localStore[subjectName] = mainCert | Add a PrivateCertificate object to this store for this subjectName.
If existingCertificate is None, add a new self-signed certificate. | entailment |
def iterconnections(self):
"""
Iterator of all connections associated with this service,
whether cached or not. For testing purposes only.
"""
return itertools.chain(
self.secureConnectionCache.cachedConnections.itervalues(),
iter(self.subConnections),
(self.dispatcher or ()) and self.dispatcher.iterconnections()) | Iterator of all connections associated with this service,
whether cached or not. For testing purposes only. | entailment |
def listenQ2Q(self, fromAddress, protocolsToFactories, serverDescription):
"""
Right now this is really only useful in the client implementation,
since it is transient. protocolFactoryFactory is used for persistent
listeners.
"""
myDomain = fromAddress.domainAddress()
D = self.getSecureConnection(fromAddress, myDomain)
def _secured(proto):
lfm = self.localFactoriesMapping
def startup(listenResult):
for protocol, factory in protocolsToFactories.iteritems():
key = (fromAddress, protocol)
if key not in lfm:
lfm[key] = []
lfm[key].append((factory, serverDescription))
factory.doStart()
def shutdown():
for protocol, factory in protocolsToFactories.iteritems():
lfm[fromAddress, protocol].remove(
(factory, serverDescription))
factory.doStop()
proto.notifyOnConnectionLost(shutdown)
return listenResult
if self.dispatcher is not None:
gp = proto.transport.getPeer()
udpAddress = (gp.host, gp.port)
pubUDPDeferred = self._retrievePublicUDPPortNumber(udpAddress)
else:
pubUDPDeferred = defer.succeed(None)
def _gotPubUDPPort(publicAddress):
self._publicUDPAddress = publicAddress
return proto.listen(fromAddress, protocolsToFactories.keys(),
serverDescription).addCallback(startup)
pubUDPDeferred.addCallback(_gotPubUDPPort)
return pubUDPDeferred
D.addCallback(_secured)
return D | Right now this is really only useful in the client implementation,
since it is transient. protocolFactoryFactory is used for persistent
listeners. | entailment |
def requestCertificateForAddress(self, fromAddress, sharedSecret):
"""
Connect to the authoritative server for the domain part of the given
address and obtain a certificate signed by the root certificate for
that domain, then store that certificate in my local certificate
storage.
@param fromAddress: an address that this service is authorized to use,
and should store a separate private certificate for.
@param sharedSecret: a str that represents a secret shared between the
user of this service and their account on the server running on the
domain part of the fromAddress.
@return: a Deferred which fires None when the certificate has been
successfully retrieved, and errbacks if it cannot be retrieved.
"""
kp = KeyPair.generate()
subject = DistinguishedName(commonName=str(fromAddress))
reqobj = kp.requestObject(subject)
# Create worthless, self-signed certificate for the moment, it will be
# replaced later.
# attemptAddress = q2q.Q2QAddress(fromAddress.domain,
# fromAddress.resource + '+attempt')
# fakeSubj = DistinguishedName(commonName=str(attemptAddress))
fakereq = kp.requestObject(subject)
ssigned = kp.signRequestObject(subject, fakereq, 1)
certpair = PrivateCertificate.fromCertificateAndKeyPair
fakecert = certpair(ssigned, kp)
apc = self.certificateStorage.addPrivateCertificate
gettingSecureConnection = self.getSecureConnection(
fromAddress, fromAddress.domainAddress(), authorize=False,
usePrivateCertificate=fakecert,
)
def gotSecureConnection(secured):
return secured.callRemote(
Sign,
certificate_request=reqobj,
password=sharedSecret)
gettingSecureConnection.addCallback(gotSecureConnection)
def gotSignResponse(signResponse):
cert = signResponse['certificate']
privcert = certpair(cert, kp)
apc(str(fromAddress), privcert)
return signResponse
return gettingSecureConnection.addCallback(gotSignResponse) | Connect to the authoritative server for the domain part of the given
address and obtain a certificate signed by the root certificate for
that domain, then store that certificate in my local certificate
storage.
@param fromAddress: an address that this service is authorized to use,
and should store a separate private certificate for.
@param sharedSecret: a str that represents a secret shared between the
user of this service and their account on the server running on the
domain part of the fromAddress.
@return: a Deferred which fires None when the certificate has been
successfully retrieved, and errbacks if it cannot be retrieved. | entailment |
def mapListener(self, to, From, protocolName, protocolFactory,
isClient=False):
"""
Returns 2-tuple of (expiryTime, listenerID)
"""
listenerID = self._nextConnectionID(From, to)
call = reactor.callLater(120,
self.unmapListener,
listenerID)
expires = datetime.datetime(*time.localtime(call.getTime())[:7])
self.inboundConnections[listenerID] = (
_ConnectionWaiter(
From, to, protocolName, protocolFactory, isClient
),
call
)
return expires, listenerID | Returns 2-tuple of (expiryTime, listenerID) | entailment |
def lookupListener(self, listenID):
"""
(internal)
Retrieve a waiting connection by its connection identifier, passing in
the transport to be used to connect the waiting protocol factory to.
"""
if listenID in self.inboundConnections:
# Make the connection?
cwait, call = self.inboundConnections.pop(listenID)
# _ConnectionWaiter instance
call.cancel()
return cwait | (internal)
Retrieve a waiting connection by its connection identifier, passing in
the transport to be used to connect the waiting protocol factory to. | entailment |
def getLocalFactories(self, From, to, protocolName):
"""
Returns a list of 2-tuples of (protocolFactory, description) to handle
this from/to/protocolName
@param From:
@param to:
@param protocolName:
@return:
"""
result = []
x = self.localFactoriesMapping.get((to, protocolName), ())
result.extend(x)
y = self.protocolFactoryFactory(From, to, protocolName)
result.extend(y)
return result | Returns a list of 2-tuples of (protocolFactory, description) to handle
this from/to/protocolName
@param From:
@param to:
@param protocolName:
@return: | entailment |
def connectQ2Q(self, fromAddress, toAddress, protocolName, protocolFactory,
usePrivateCertificate=None, fakeFromDomain=None,
chooser=None):
"""
Connect a named protocol factory from a resource@domain to a
resource@domain.
This is analagous to something like connectTCP, in that it creates a
connection-oriented transport for each connection, except instead of
specifying your credentials with an application-level (username,
password) and your endpoint with a framework-level (host, port), you
specify both at once, in the form of your ID (user@my-domain), their ID
(user@their-domain) and the desired protocol. This provides several
useful features:
- All connections are automatically authenticated via SSL
certificates, although not authorized for any particular
activities, based on their transport interface rather than having
to have protocol logic to authenticate.
- User-meaningful protocol nicknames are attached to
implementations of protocol logic, rather than arbitrary
numbering.
- Endpoints can specify a variety of transport mechanisms
transparently to the application: for example, you might be
connecting to an authorized user-agent on the user's server or to
the user directly using a NAT-circumvention handshake. All the
application has to know is that it wants to establish a TCP-like
connection.
XXX Really, really should return an IConnector implementor for symmetry
with other connection-oriented transport APIs, but currently does not.
The 'resource' parameters are so named (rather than beginning with
'user', for example) because they are sometimes used to refer to
abstract entities or roles, such as 'payments', or groups of users
(communities) but generally the convention is to document them as
individual users for simplicity's sake.
The parameters are described as if Alice <alice@divmod.com> were trying
try connect to Bob <bob@notdivmod.com> to transfer a file over HTTP.
@param fromAddress: The address of the connecting user: in this case,
Q2QAddress("divmod.com", "alice")
@param toAddress: The address of the user connected to: in this case,
Q2QAddress("notdivmod.com", "bob")
@param protocolName: The name of the protocol, by convention observing
similar names to http://www.iana.org/assignments/port-numbers when
appropriate. In this case, 'http'.
@param protocolFactory: An implementation of
L{twisted.internet.interfaces.IProtocolFactory}
@param usePrivateCertificate: Use a different private certificate for
initiating the 'secure' call. Mostly for testing different invalid
certificate attacks.
@param fakeFromDomain: This domain name will be used for an argument to
the 'connect' command, but NOT as an argument to the SECURE command.
This is to test a particular kind of invalid cert attack.
@param chooser: a function taking a list of connection-describing
objects and returning another list. Those items in the remaining list
will be attempted as connections and buildProtocol called on the client
factory. May return a Deferred.
@default chooser: C{lambda x: x and [x[0]]}
@return:
"""
if chooser is None:
chooser = lambda x: x and [x[0]]
def onSecureConnection(protocol):
if fakeFromDomain:
connectFromAddress = Q2QAddress(
fakeFromDomain,
toAddress.resource
)
else:
connectFromAddress = fromAddress
return protocol.connect(connectFromAddress, toAddress,
protocolName, protocolFactory,
chooser)
def onSecureConnectionFailure(reason):
protocolFactory.clientConnectionFailed(None, reason)
return reason
return self.getSecureConnection(
fromAddress, toAddress,
port, usePrivateCertificate).addCallback(
onSecureConnection).addErrback(onSecureConnectionFailure) | Connect a named protocol factory from a resource@domain to a
resource@domain.
This is analagous to something like connectTCP, in that it creates a
connection-oriented transport for each connection, except instead of
specifying your credentials with an application-level (username,
password) and your endpoint with a framework-level (host, port), you
specify both at once, in the form of your ID (user@my-domain), their ID
(user@their-domain) and the desired protocol. This provides several
useful features:
- All connections are automatically authenticated via SSL
certificates, although not authorized for any particular
activities, based on their transport interface rather than having
to have protocol logic to authenticate.
- User-meaningful protocol nicknames are attached to
implementations of protocol logic, rather than arbitrary
numbering.
- Endpoints can specify a variety of transport mechanisms
transparently to the application: for example, you might be
connecting to an authorized user-agent on the user's server or to
the user directly using a NAT-circumvention handshake. All the
application has to know is that it wants to establish a TCP-like
connection.
XXX Really, really should return an IConnector implementor for symmetry
with other connection-oriented transport APIs, but currently does not.
The 'resource' parameters are so named (rather than beginning with
'user', for example) because they are sometimes used to refer to
abstract entities or roles, such as 'payments', or groups of users
(communities) but generally the convention is to document them as
individual users for simplicity's sake.
The parameters are described as if Alice <alice@divmod.com> were trying
try connect to Bob <bob@notdivmod.com> to transfer a file over HTTP.
@param fromAddress: The address of the connecting user: in this case,
Q2QAddress("divmod.com", "alice")
@param toAddress: The address of the user connected to: in this case,
Q2QAddress("notdivmod.com", "bob")
@param protocolName: The name of the protocol, by convention observing
similar names to http://www.iana.org/assignments/port-numbers when
appropriate. In this case, 'http'.
@param protocolFactory: An implementation of
L{twisted.internet.interfaces.IProtocolFactory}
@param usePrivateCertificate: Use a different private certificate for
initiating the 'secure' call. Mostly for testing different invalid
certificate attacks.
@param fakeFromDomain: This domain name will be used for an argument to
the 'connect' command, but NOT as an argument to the SECURE command.
This is to test a particular kind of invalid cert attack.
@param chooser: a function taking a list of connection-describing
objects and returning another list. Those items in the remaining list
will be attempted as connections and buildProtocol called on the client
factory. May return a Deferred.
@default chooser: C{lambda x: x and [x[0]]}
@return: | entailment |
def getSecureConnection(self, fromAddress, toAddress, port=port,
usePrivateCertificate=None,
authorize=True):
"""
Establish or retrieven an already-established L{Q2Q}-protocol
connection from C{fromAddress} to I{the q2q proxy provider} for
C{toAddress}; i.e. the entity responsible for the I{domain party only}
of C{toAddress}.
The connection is "from" C{fromAddress} in the sense that it will use a
certificate and private key associated with that address to
authenticate itself.
For example, if we want to connect from C{foo@bar.com} to
C{baz@qux.com}, this will establish a connection to C{qux.com}.
@param fromAddress: The address of the party represented by the local
host.
@type fromAddress: L{Q2QAddress}
@param toAddress: The address of the party whose proxy we are trying to
connect to. The domain part of this address is the DNS name to
connect to, and also the source (in our local certificate store, or
via a TOFU DNS lookup) of the certificate authority to use to
verify the connection.
@type toAddress: L{Q2QAddress}
@param port: The TCP port number on which to make the outgoing
connection.
@type port: L{int}
@param usePrivateCertificate:
@param authorize:
@return: A L{Deferred} firing with a connected L{Q2Q} where the peer is
the I{domain part} of the given C{toAddress}.
"""
# Secure connections using users as clients will have to be established
# using the 'secure' method differently than this does: we are ONLY
# capable of connecting to other domains (supernodes)
toDomain = toAddress.domainAddress()
resolveme = reactor.resolve(str(toDomain))
def cb(toIPAddress, authorize=authorize):
GPS = self.certificateStorage.getPrivateCertificate
if usePrivateCertificate:
ourCert = usePrivateCertificate
cacheFrom = fromAddress
log.msg(
'Using fakie private cert:',
fromAddress,
ourCert,
cacheFrom
)
elif fromAddress.domain == '':
assert all(
(fromAddress.resource == '',
"No domain means anonymous, %r" % (fromAddress,))
)
# We are actually anonymous, whoops!
authorize = False
# We need to create our own certificate
ourCert = KeyPair.generate().selfSignedCert(218374, CN='@')
# Feel free to cache the anonymous certificate we just made
cacheFrom = fromAddress
log.msg("Using anonymous cert for anonymous user.")
else:
try:
# Are we in fact a domain, operating on behalf of a user?
x = fromAddress.domainAddress()
ourCert = GPS(str(x))
cacheFrom = x
log.msg(
'domain on behalf of user:',
fromAddress,
ourCert,
cacheFrom
)
except KeyError:
# Nope, guess not. Are we actually that user?
try:
x = fromAddress
ourCert = GPS(str(x))
cacheFrom = x
log.msg(
'actual user:',
fromAddress,
ourCert,
cacheFrom
)
except KeyError:
# Hmm. We're not that user either. Are we trying to
# pretend to be a user from a *different* domain, to
# ourselves? (We've got to be a domain to "make
# believe", since this is effectively a clustering
# feature...)
try:
x = toDomain
ourCert = GPS(str(x))
cacheFrom = x
log.msg(
'fakie domain cert:',
fromAddress,
ourCert,
cacheFrom
)
except KeyError:
raise VerifyError(
"We tried to secure a connection "
"between %s and %s, "
"but we don't have any certificates "
"that could be used." % (fromAddress,
toAddress))
def connected(proto):
certD = self.certificateStorage.getSelfSignedCertificate(
str(toDomain))
def nocert(failure):
failure.trap(KeyError)
identD = proto.callRemote(
Identify, subject=toDomain
).addCallback(
lambda x: x['certificate'])
def storeit(certificate):
return (
self.certificateStorage.storeSelfSignedCertificate(
str(toDomain),
certificate
).addCallback(lambda x: certificate)
)
return identD.addCallback(storeit)
certD.addErrback(nocert)
def gotcert(foreignCA):
secdef = proto.secure(cacheFrom, toDomain,
ourCert, foreignCA,
authorize=authorize)
return secdef
certD.addCallback(gotcert)
return certD
return self.secureConnectionCache.connectCached(
endpoint.TCPEndpoint(toIPAddress, port),
Q2QClientFactory(self),
extraWork=connected,
extraHash=(cacheFrom, toDomain, authorize)
)
return resolveme.addCallback(cb) | Establish or retrieven an already-established L{Q2Q}-protocol
connection from C{fromAddress} to I{the q2q proxy provider} for
C{toAddress}; i.e. the entity responsible for the I{domain party only}
of C{toAddress}.
The connection is "from" C{fromAddress} in the sense that it will use a
certificate and private key associated with that address to
authenticate itself.
For example, if we want to connect from C{foo@bar.com} to
C{baz@qux.com}, this will establish a connection to C{qux.com}.
@param fromAddress: The address of the party represented by the local
host.
@type fromAddress: L{Q2QAddress}
@param toAddress: The address of the party whose proxy we are trying to
connect to. The domain part of this address is the DNS name to
connect to, and also the source (in our local certificate store, or
via a TOFU DNS lookup) of the certificate authority to use to
verify the connection.
@type toAddress: L{Q2QAddress}
@param port: The TCP port number on which to make the outgoing
connection.
@type port: L{int}
@param usePrivateCertificate:
@param authorize:
@return: A L{Deferred} firing with a connected L{Q2Q} where the peer is
the I{domain part} of the given C{toAddress}. | entailment |
def _backup_previous_version(func: Callable = None):
"""Private decorator to back up previous skill folder"""
@wraps(func)
def wrapper(self, *args, **kwargs):
self.old_path = None
if self.is_local:
self.old_path = join(gettempdir(), self.name)
if exists(self.old_path):
rmtree(self.old_path)
shutil.copytree(self.path, self.old_path)
try:
func(self, *args, **kwargs)
# Modified skill or GitError should not restore working copy
except (SkillModified, GitError, GitException):
raise
except Exception:
LOG.info('Problem performing action. Restoring skill to '
'previous state...')
if exists(self.path):
rmtree(self.path)
if self.old_path and exists(self.old_path):
shutil.copytree(self.old_path, self.path)
self.is_local = exists(self.path)
raise
return wrapper | Private decorator to back up previous skill folder | entailment |
def attach(self, remote_entry):
"""Attach a remote entry to a local entry"""
self.name = remote_entry.name
self.sha = remote_entry.sha
self.url = remote_entry.url
self.author = remote_entry.author
return self | Attach a remote entry to a local entry | entailment |
def parse_permission_set(input):
"""Lookup a permission set name in the defined permissions.
Requires a Flask app context.
"""
# Priority goes to the user's parsers.
if isinstance(input, basestring):
for func in current_acl_manager.permission_set_parsers:
res = func(input)
if res is not None:
input = res
break
if isinstance(input, basestring):
try:
return current_acl_manager.permission_sets[input]
except KeyError:
raise ValueError('unknown permission set %r' % input)
return input | Lookup a permission set name in the defined permissions.
Requires a Flask app context. | entailment |
def is_permission_in_set(perm, perm_set):
"""Test if a permission is in the given set.
:param perm: The permission object to check for.
:param perm_set: The set to check in. If a ``str``, the permission is
checked for equality. If a container, the permission is looked for in
the set. If a function, the permission is passed to the "set".
"""
if isinstance(perm_set, basestring):
return perm == perm_set
elif isinstance(perm_set, Container):
return perm in perm_set
elif isinstance(perm_set, Callable):
return perm_set(perm)
else:
raise TypeError('permission set must be a string, container, or callable') | Test if a permission is in the given set.
:param perm: The permission object to check for.
:param perm_set: The set to check in. If a ``str``, the permission is
checked for equality. If a container, the permission is looked for in
the set. If a function, the permission is passed to the "set". | entailment |
def convert_raw_data_to_universes(raw_data) -> tuple:
"""
converts the raw data to a readable universes tuple. The raw_data is scanned from index 0 and has to have
16-bit numbers with high byte first. The data is converted from the start to the beginning!
:param raw_data: the raw data to convert
:return: tuple full with 16-bit numbers
"""
if len(raw_data)%2 != 0:
raise TypeError('The given data has not a length that is a multiple of 2!')
rtrnList = []
for i in range(0, len(raw_data), 2):
rtrnList.append(two_bytes_to_int(raw_data[i], raw_data[i+1]))
return tuple(rtrnList) | converts the raw data to a readable universes tuple. The raw_data is scanned from index 0 and has to have
16-bit numbers with high byte first. The data is converted from the start to the beginning!
:param raw_data: the raw data to convert
:return: tuple full with 16-bit numbers | entailment |
def make_multiple_uni_disc_packets(cid: tuple, sourceName: str, universes: list) -> List['UniverseDiscoveryPacket']:
"""
Creates a list with universe discovery packets based on the given data. It creates automatically enough packets
for the given universes list.
:param cid: the cid to use in all packets
:param sourceName: the source name to use in all packets
:param universes: the universes. Can be longer than 512, but has to be shorter than 256*512.
The values in the list should be [1-63999]
:return: a list full of universe discovery packets
"""
tmpList = []
if len(universes)%512 != 0:
num_of_packets = int(len(universes)/512)+1
else: # just get how long the list has to be. Just read and think about the if statement.
# Should be self-explaining
num_of_packets = int(len(universes)/512)
universes.sort() # E1.31 wants that the send out universes are sorted
for i in range(0, num_of_packets):
if i == num_of_packets-1:
tmpUniverses = universes[i * 512:len(universes)]
# if we are here, then the for is in the last loop
else:
tmpUniverses = universes[i * 512:(i+1) * 512]
# create new UniverseDiscoveryPacket and append it to the list. Page and lastPage are getting special values
tmpList.append(UniverseDiscoveryPacket(cid=cid, sourceName=sourceName, universes=tmpUniverses,
page=i, lastPage=num_of_packets-1))
return tmpList | Creates a list with universe discovery packets based on the given data. It creates automatically enough packets
for the given universes list.
:param cid: the cid to use in all packets
:param sourceName: the source name to use in all packets
:param universes: the universes. Can be longer than 512, but has to be shorter than 256*512.
The values in the list should be [1-63999]
:return: a list full of universe discovery packets | entailment |
def paginate_queryset(self, queryset, request, view=None):
"""
adds `max_count` as a running tally of the largest table size. Used for calculating
next/previous links later
"""
result = super(MultipleModelLimitOffsetPagination, self).paginate_queryset(queryset, request, view)
try:
if self.max_count < self.count:
self.max_count = self.count
except AttributeError:
self.max_count = self.count
try:
self.total += self.count
except AttributeError:
self.total = self.count
return result | adds `max_count` as a running tally of the largest table size. Used for calculating
next/previous links later | entailment |
def format_response(self, data):
"""
replaces the `count` (the last queryset count) with the running `max_count` variable,
to ensure accurate link calculation
"""
self.count = self.max_count
return OrderedDict([
('highest_count', self.max_count),
('overall_total', self.total),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]) | replaces the `count` (the last queryset count) with the running `max_count` variable,
to ensure accurate link calculation | entailment |
def check_query_data(self, query_data):
"""
All items in a `querylist` must at least have `queryset` key and a
`serializer_class` key. Any querylist item lacking both those keys
will raise a ValidationError
"""
for key in self.required_keys:
if key not in query_data:
raise ValidationError(
'All items in the {} querylist attribute should contain a '
'`{}` key'.format(self.__class__.__name__, key)
) | All items in a `querylist` must at least have `queryset` key and a
`serializer_class` key. Any querylist item lacking both those keys
will raise a ValidationError | entailment |
def load_queryset(self, query_data, request, *args, **kwargs):
"""
Fetches the queryset and runs any necessary filtering, both
built-in rest_framework filters and custom filters passed into
the querylist
"""
queryset = query_data.get('queryset', [])
if isinstance(queryset, QuerySet):
# Ensure queryset is re-evaluated on each request.
queryset = queryset.all()
# run rest_framework filters
queryset = self.filter_queryset(queryset)
# run custom filters
filter_fn = query_data.get('filter_fn', None)
if filter_fn is not None:
queryset = filter_fn(queryset, request, *args, **kwargs)
page = self.paginate_queryset(queryset)
self.is_paginated = page is not None
return page if page is not None else queryset | Fetches the queryset and runs any necessary filtering, both
built-in rest_framework filters and custom filters passed into
the querylist | entailment |
def get_empty_results(self):
"""
Because the base result type is different depending on the return structure
(e.g. list for flat, dict for object), `get_result_type` initials the
`results` variable to the proper type
"""
assert self.result_type is not None, (
'{} must specify a `result_type` value or overwrite the '
'`get_empty_result` method.'.format(self.__class__.__name__)
)
return self.result_type() | Because the base result type is different depending on the return structure
(e.g. list for flat, dict for object), `get_result_type` initials the
`results` variable to the proper type | entailment |
def add_to_results(self, data, label, results):
"""
responsible for updating the running `results` variable with the
data from this queryset/serializer combo
"""
raise NotImplementedError(
'{} must specify how to add data to the running results tally '
'by overriding the `add_to_results` method.'.format(
self.__class__.__name__
)
) | responsible for updating the running `results` variable with the
data from this queryset/serializer combo | entailment |
def initial(self, request, *args, **kwargs):
"""
Overrides DRF's `initial` in order to set the `_sorting_field` from corresponding property in view.
Protected property is required in order to support overriding of `sorting_field` via `@property`, we do this
after original `initial` has been ran in order to make sure that view has all its properties set up.
"""
super(FlatMultipleModelMixin, self).initial(request, *args, **kwargs)
assert not (self.sorting_field and self.sorting_fields), \
'{} should either define ``sorting_field`` or ``sorting_fields`` property, not both.' \
.format(self.__class__.__name__)
if self.sorting_field:
warnings.warn(
'``sorting_field`` property is pending its deprecation. Use ``sorting_fields`` instead.',
DeprecationWarning
)
self.sorting_fields = [self.sorting_field]
self._sorting_fields = self.sorting_fields | Overrides DRF's `initial` in order to set the `_sorting_field` from corresponding property in view.
Protected property is required in order to support overriding of `sorting_field` via `@property`, we do this
after original `initial` has been ran in order to make sure that view has all its properties set up. | entailment |
def add_to_results(self, data, label, results):
"""
Adds the label to the results, as needed, then appends the data
to the running results tab
"""
for datum in data:
if label is not None:
datum.update({'type': label})
results.append(datum)
return results | Adds the label to the results, as needed, then appends the data
to the running results tab | entailment |
def format_results(self, results, request):
"""
Prepares sorting parameters, and sorts results, if(as) necessary
"""
self.prepare_sorting_fields()
if self._sorting_fields:
results = self.sort_results(results)
if request.accepted_renderer.format == 'html':
# Makes the the results available to the template context by transforming to a dict
results = {'data': results}
return results | Prepares sorting parameters, and sorts results, if(as) necessary | entailment |
def _sort_by(self, datum, param, path=None):
"""
Key function that is used for results sorting. This is passed as argument to `sorted()`
"""
if not path:
path = []
try:
if '__' in param:
root, new_param = param.split('__')
path.append(root)
return self._sort_by(datum[root], param=new_param, path=path)
else:
path.append(param)
data = datum[param]
if isinstance(data, list):
raise ValidationError(self._list_attribute_error.format(param))
return data
except TypeError:
raise ValidationError(self._list_attribute_error.format('.'.join(path)))
except KeyError:
raise ValidationError('Invalid sorting field: {}'.format('.'.join(path))) | Key function that is used for results sorting. This is passed as argument to `sorted()` | entailment |
def prepare_sorting_fields(self):
"""
Determine sorting direction and sorting field based on request query parameters and sorting options
of self
"""
if self.sorting_parameter_name in self.request.query_params:
# Extract sorting parameter from query string
self._sorting_fields = [
_.strip() for _ in self.request.query_params.get(self.sorting_parameter_name).split(',')
]
if self._sorting_fields:
# Create a list of sorting parameters. Each parameter is a tuple: (field:str, descending:bool)
self._sorting_fields = [
(self.sorting_fields_map.get(field.lstrip('-'), field.lstrip('-')), field[0] == '-')
for field in self._sorting_fields
] | Determine sorting direction and sorting field based on request query parameters and sorting options
of self | entailment |
def get_label(self, queryset, query_data):
"""
Gets option label for each datum. Can be used for type identification
of individual serialized objects
"""
if query_data.get('label', False):
return query_data['label']
try:
return queryset.model.__name__
except AttributeError:
return query_data['queryset'].model.__name__ | Gets option label for each datum. Can be used for type identification
of individual serialized objects | entailment |
def _extension(modpath: str) -> setuptools.Extension:
"""Make setuptools.Extension."""
return setuptools.Extension(modpath, [modpath.replace(".", "/") + ".py"]) | Make setuptools.Extension. | entailment |
def build_extension(self, ext):
"""build_extension.
:raises BuildFailed: cythonize impossible
"""
try:
build_ext.build_ext.build_extension(self, ext)
except (
distutils.errors.CCompilerError,
distutils.errors.DistutilsExecError,
distutils.errors.DistutilsPlatformError,
ValueError,
):
raise BuildFailed() | build_extension.
:raises BuildFailed: cythonize impossible | entailment |
def extend_env(conn, arguments):
"""
get the remote environment's env so we can explicitly add the path without
wiping out everything
"""
# retrieve the remote environment variables for the host
try:
result = conn.gateway.remote_exec("import os; channel.send(os.environ.copy())")
env = result.receive()
except Exception:
conn.logger.exception('failed to retrieve the remote environment variables')
env = {}
# get the $PATH and extend it (do not overwrite)
path = env.get('PATH', '')
env['PATH'] = path + '/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin'
arguments['env'] = env
if arguments.get('extend_env'):
for key, value in arguments['extend_env'].items():
arguments['env'][key] = value
arguments.pop('extend_env')
return arguments | get the remote environment's env so we can explicitly add the path without
wiping out everything | entailment |
def run(conn, command, exit=False, timeout=None, **kw):
"""
A real-time-logging implementation of a remote subprocess.Popen call where
a command is just executed on the remote end and no other handling is done.
:param conn: A connection oject
:param command: The command to pass in to the remote subprocess.Popen
:param exit: If this call should close the connection at the end
:param timeout: How many seconds to wait after no remote data is received
(defaults to wait for ever)
"""
stop_on_error = kw.pop('stop_on_error', True)
if not kw.get('env'):
# get the remote environment's env so we can explicitly add
# the path without wiping out everything
kw = extend_env(conn, kw)
command = conn.cmd(command)
timeout = timeout or conn.global_timeout
conn.logger.info('Running command: %s' % ' '.join(admin_command(conn.sudo, command)))
result = conn.execute(_remote_run, cmd=command, **kw)
try:
reporting(conn, result, timeout)
except Exception:
remote_trace = traceback.format_exc()
remote_error = RemoteError(remote_trace)
if remote_error.exception_name == 'RuntimeError':
conn.logger.error(remote_error.exception_line)
else:
for tb_line in remote_trace.split('\n'):
conn.logger.error(tb_line)
if stop_on_error:
raise RuntimeError(
'Failed to execute command: %s' % ' '.join(command)
)
if exit:
conn.exit() | A real-time-logging implementation of a remote subprocess.Popen call where
a command is just executed on the remote end and no other handling is done.
:param conn: A connection oject
:param command: The command to pass in to the remote subprocess.Popen
:param exit: If this call should close the connection at the end
:param timeout: How many seconds to wait after no remote data is received
(defaults to wait for ever) | entailment |
def check(conn, command, exit=False, timeout=None, **kw):
"""
Execute a remote command with ``subprocess.Popen`` but report back the
results in a tuple with three items: stdout, stderr, and exit status.
This helper function *does not* provide any logging as it is the caller's
responsibility to do so.
"""
command = conn.cmd(command)
stop_on_error = kw.pop('stop_on_error', True)
timeout = timeout or conn.global_timeout
if not kw.get('env'):
# get the remote environment's env so we can explicitly add
# the path without wiping out everything
kw = extend_env(conn, kw)
conn.logger.info('Running command: %s' % ' '.join(admin_command(conn.sudo, command)))
result = conn.execute(_remote_check, cmd=command, **kw)
response = None
try:
response = result.receive(timeout)
except Exception as err:
# the things we need to do here :(
# because execnet magic, we cannot catch this as
# `except TimeoutError`
if err.__class__.__name__ == 'TimeoutError':
msg = 'No data was received after %s seconds, disconnecting...' % timeout
conn.logger.warning(msg)
# there is no stdout, stderr, or exit code but make the exit code
# an error condition (non-zero) regardless
return [], [], -1
else:
remote_trace = traceback.format_exc()
remote_error = RemoteError(remote_trace)
if remote_error.exception_name == 'RuntimeError':
conn.logger.error(remote_error.exception_line)
else:
for tb_line in remote_trace.split('\n'):
conn.logger.error(tb_line)
if stop_on_error:
raise RuntimeError(
'Failed to execute command: %s' % ' '.join(command)
)
if exit:
conn.exit()
return response | Execute a remote command with ``subprocess.Popen`` but report back the
results in a tuple with three items: stdout, stderr, and exit status.
This helper function *does not* provide any logging as it is the caller's
responsibility to do so. | entailment |
def up_down_by_arrival(*filters, local_dir=".",
remote_dir=DEFAULT_REMOTE_DIR):
"""Monitors a local directory and a remote FlashAir directory and
generates sets of new files to be uploaded or downloaded.
Sets to upload are generated in a tuple
like (Direction.up, {...}), while download sets to download
are generated in a tuple like (Direction.down, {...}). The generator yields
before each upload or download actually takes place."""
local_monitor = watch_local_files(*filters, local_dir=local_dir)
remote_monitor = watch_remote_files(*filters, remote_dir=remote_dir)
_, lfile_set = next(local_monitor)
_, rfile_set = next(remote_monitor)
_notify_sync_ready(len(lfile_set), local_dir, remote_dir)
_notify_sync_ready(len(rfile_set), remote_dir, local_dir)
processed = set()
for new_local, new_remote in zip(local_monitor, remote_monitor):
new_local, local_set = new_local
local_arrivals = {f for f in new_local if f.filename not in processed}
yield Direction.up, local_arrivals
if local_arrivals:
new_names.update(f.filename for f in local_arrivals)
_notify_sync(Direction.up, local_arrivals)
up_by_files(local_arrivals, remote_dir)
_notify_sync_ready(len(local_set), local_dir, remote_dir)
new_remote, remote_set = new_remote
remote_arrivals = {f for f in new_remote if f.filename not in processed}
yield Direction.down, remote_arrivals
if remote_arrivals:
new_names.update(f.filename for f in remote_arrivals)
_notify_sync(Direction.down, remote_arrivals)
yield Direction.down, remote_arrivals
down_by_files(remote_arrivals, local_dir)
_notify_sync_ready(len(remote_set), remote_dir, local_dir) | Monitors a local directory and a remote FlashAir directory and
generates sets of new files to be uploaded or downloaded.
Sets to upload are generated in a tuple
like (Direction.up, {...}), while download sets to download
are generated in a tuple like (Direction.down, {...}). The generator yields
before each upload or download actually takes place. | entailment |
def up_by_arrival(*filters, local_dir=".", remote_dir=DEFAULT_REMOTE_DIR):
"""Monitors a local directory and
generates sets of new files to be uploaded to FlashAir.
Sets to upload are generated in a tuple like (Direction.up, {...}).
The generator yields before each upload actually takes place."""
local_monitor = watch_local_files(*filters, local_dir=local_dir)
_, file_set = next(local_monitor)
_notify_sync_ready(len(file_set), local_dir, remote_dir)
for new_arrivals, file_set in local_monitor:
yield Direction.up, new_arrivals # where new_arrivals is possibly empty
if new_arrivals:
_notify_sync(Direction.up, new_arrivals)
up_by_files(new_arrivals, remote_dir)
_notify_sync_ready(len(file_set), local_dir, remote_dir) | Monitors a local directory and
generates sets of new files to be uploaded to FlashAir.
Sets to upload are generated in a tuple like (Direction.up, {...}).
The generator yields before each upload actually takes place. | entailment |
def down_by_arrival(*filters, local_dir=".", remote_dir=DEFAULT_REMOTE_DIR):
"""Monitors a remote FlashAir directory and generates sets of
new files to be downloaded from FlashAir.
Sets to download are generated in a tuple like (Direction.down, {...}).
The generator yields AFTER each download actually takes place."""
remote_monitor = watch_remote_files(*filters, remote_dir=remote_dir)
_, file_set = next(remote_monitor)
_notify_sync_ready(len(file_set), remote_dir, local_dir)
for new_arrivals, file_set in remote_monitor:
if new_arrivals:
_notify_sync(Direction.down, new_arrivals)
down_by_files(new_arrivals, local_dir)
_notify_sync_ready(len(file_set), remote_dir, local_dir)
yield Direction.down, new_arrivals | Monitors a remote FlashAir directory and generates sets of
new files to be downloaded from FlashAir.
Sets to download are generated in a tuple like (Direction.down, {...}).
The generator yields AFTER each download actually takes place. | entailment |
def down_by_time(*filters, remote_dir=DEFAULT_REMOTE_DIR, local_dir=".", count=1):
"""Sync most recent file by date, time attribues"""
files = command.list_files(*filters, remote_dir=remote_dir)
most_recent = sorted(files, key=lambda f: f.datetime)
to_sync = most_recent[-count:]
_notify_sync(Direction.down, to_sync)
down_by_files(to_sync[::-1], local_dir=local_dir) | Sync most recent file by date, time attribues | entailment |
def down_by_name(*filters, remote_dir=DEFAULT_REMOTE_DIR, local_dir=".", count=1):
"""Sync files whose filename attribute is highest in alphanumeric order"""
files = command.list_files(*filters, remote_dir=remote_dir)
greatest = sorted(files, key=lambda f: f.filename)
to_sync = greatest[-count:]
_notify_sync(Direction.down, to_sync)
down_by_files(to_sync[::-1], local_dir=local_dir) | Sync files whose filename attribute is highest in alphanumeric order | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.