_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q38700 | String.strlen | train | def strlen(self, name):
"""
Return the number of bytes stored in the value of the key
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
return pipe.strlen(self.redis_key(name)) | python | {
"resource": ""
} |
q38701 | String.setbit | train | def setbit(self, name, offset, value):
"""
Flag the ``offset`` in the key as ``value``. Returns a boolean
indicating the previous value of ``offset``.
:param name: str the name of the redis key
:param offset: int
:param value:
:return: Future()
"""
with self.pipe as pipe:
return pipe.setbit(self.redis_key(name), offset, value) | python | {
"resource": ""
} |
q38702 | String.getbit | train | def getbit(self, name, offset):
"""
Returns a boolean indicating the value of ``offset`` in key
:param name: str the name of the redis key
:param offset: int
:return: Future()
"""
with self.pipe as pipe:
return pipe.getbit(self.redis_key(name), offset) | python | {
"resource": ""
} |
q38703 | String.incr | train | def incr(self, name, amount=1):
"""
increment the value for key by 1
:param name: str the name of the redis key
:param amount: int
:return: Future()
"""
with self.pipe as pipe:
return pipe.incr(self.redis_key(name), amount=amount) | python | {
"resource": ""
} |
q38704 | Set.sunionstore | train | def sunionstore(self, dest, keys, *args):
"""
Store the union of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of members in the new set.
"""
keys = [self.redis_key(k) for k in self._parse_values(keys, args)]
with self.pipe as pipe:
return pipe.sunionstore(self.redis_key(dest), *keys) | python | {
"resource": ""
} |
q38705 | Set.sadd | train | def sadd(self, name, values, *args):
"""
Add the specified members to the Set.
:param name: str the name of the redis key
:param values: a list of values or a simple value.
:return: Future()
"""
with self.pipe as pipe:
values = [self.valueparse.encode(v) for v in
self._parse_values(values, args)]
return pipe.sadd(self.redis_key(name), *values) | python | {
"resource": ""
} |
q38706 | Set.scard | train | def scard(self, name):
"""
How many items in the set?
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
return pipe.scard(self.redis_key(name)) | python | {
"resource": ""
} |
q38707 | Set.sismember | train | def sismember(self, name, value):
"""
Is the provided value is in the ``Set``?
:param name: str the name of the redis key
:param value: str
:return: Future()
"""
with self.pipe as pipe:
return pipe.sismember(self.redis_key(name),
self.valueparse.encode(value)) | python | {
"resource": ""
} |
q38708 | Set.srandmember | train | def srandmember(self, name, number=None):
"""
Return a random member of the set.
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
f = Future()
res = pipe.srandmember(self.redis_key(name), number=number)
def cb():
if number is None:
f.set(self.valueparse.decode(res.result))
else:
f.set([self.valueparse.decode(v) for v in res.result])
pipe.on_execute(cb)
return f | python | {
"resource": ""
} |
q38709 | Set.sscan_iter | train | def sscan_iter(self, name, match=None, count=None):
"""
Make an iterator using the SSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
:param name: str the name of the redis key
:param match: str
:param count: int
"""
if self._pipe is not None:
raise InvalidOperation('cannot pipeline scan operations')
cursor = '0'
while cursor != 0:
cursor, data = self.sscan(name, cursor=cursor,
match=match, count=count)
for item in data:
yield item | python | {
"resource": ""
} |
q38710 | List.llen | train | def llen(self, name):
"""
Returns the length of the list.
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
return pipe.llen(self.redis_key(name)) | python | {
"resource": ""
} |
q38711 | List.lrange | train | def lrange(self, name, start, stop):
"""
Returns a range of items.
:param name: str the name of the redis key
:param start: integer representing the start index of the range
:param stop: integer representing the size of the list.
:return: Future()
"""
with self.pipe as pipe:
f = Future()
res = pipe.lrange(self.redis_key(name), start, stop)
def cb():
f.set([self.valueparse.decode(v) for v in res.result])
pipe.on_execute(cb)
return f | python | {
"resource": ""
} |
q38712 | List.lpop | train | def lpop(self, name):
"""
Pop the first object from the left.
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
f = Future()
res = pipe.lpop(self.redis_key(name))
def cb():
f.set(self.valueparse.decode(res.result))
pipe.on_execute(cb)
return f | python | {
"resource": ""
} |
q38713 | List.lrem | train | def lrem(self, name, value, num=1):
"""
Remove first occurrence of value.
Can't use redis-py interface. It's inconstistent between
redis.Redis and redis.StrictRedis in terms of the kwargs.
Better to use the underlying execute_command instead.
:param name: str the name of the redis key
:param num:
:param value:
:return: Future()
"""
with self.pipe as pipe:
value = self.valueparse.encode(value)
return pipe.execute_command('LREM', self.redis_key(name),
num, value) | python | {
"resource": ""
} |
q38714 | List.ltrim | train | def ltrim(self, name, start, end):
"""
Trim the list from start to end.
:param name: str the name of the redis key
:param start:
:param end:
:return: Future()
"""
with self.pipe as pipe:
return pipe.ltrim(self.redis_key(name), start, end) | python | {
"resource": ""
} |
q38715 | SortedSet.zadd | train | def zadd(self, name, members, score=1, nx=False,
xx=False, ch=False, incr=False):
"""
Add members in the set and assign them the score.
:param name: str the name of the redis key
:param members: a list of item or a single item
:param score: the score the assign to the item(s)
:param nx:
:param xx:
:param ch:
:param incr:
:return: Future()
"""
if nx:
_args = ['NX']
elif xx:
_args = ['XX']
else:
_args = []
if ch:
_args.append('CH')
if incr:
_args.append('INCR')
if isinstance(members, dict):
for member, score in members.items():
_args += [score, self.valueparse.encode(member)]
else:
_args += [score, self.valueparse.encode(members)]
if nx and xx:
raise InvalidOperation('cannot specify nx and xx at the same time')
with self.pipe as pipe:
return pipe.execute_command('ZADD', self.redis_key(name), *_args) | python | {
"resource": ""
} |
q38716 | SortedSet.zincrby | train | def zincrby(self, name, value, amount=1):
"""
Increment the score of the item by `value`
:param name: str the name of the redis key
:param value:
:param amount:
:return:
"""
with self.pipe as pipe:
return pipe.zincrby(self.redis_key(name),
value=self.valueparse.encode(value),
amount=amount) | python | {
"resource": ""
} |
q38717 | SortedSet.zrevrank | train | def zrevrank(self, name, value):
"""
Returns the ranking in reverse order for the member
:param name: str the name of the redis key
:param member: str
"""
with self.pipe as pipe:
return pipe.zrevrank(self.redis_key(name),
self.valueparse.encode(value)) | python | {
"resource": ""
} |
q38718 | SortedSet.zcard | train | def zcard(self, name):
"""
Returns the cardinality of the SortedSet.
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
return pipe.zcard(self.redis_key(name)) | python | {
"resource": ""
} |
q38719 | SortedSet.zscore | train | def zscore(self, name, value):
"""
Return the score of an element
:param name: str the name of the redis key
:param value: the element in the sorted set key
:return: Future()
"""
with self.pipe as pipe:
return pipe.zscore(self.redis_key(name),
self.valueparse.encode(value)) | python | {
"resource": ""
} |
q38720 | SortedSet.zremrangebyrank | train | def zremrangebyrank(self, name, min, max):
"""
Remove a range of element between the rank ``start`` and
``stop`` both included.
:param name: str the name of the redis key
:param min:
:param max:
:return: Future()
"""
with self.pipe as pipe:
return pipe.zremrangebyrank(self.redis_key(name), min, max) | python | {
"resource": ""
} |
q38721 | SortedSet.zremrangebyscore | train | def zremrangebyscore(self, name, min, max):
"""
Remove a range of element by between score ``min_value`` and
``max_value`` both included.
:param name: str the name of the redis key
:param min:
:param max:
:return: Future()
"""
with self.pipe as pipe:
return pipe.zremrangebyscore(self.redis_key(name), min, max) | python | {
"resource": ""
} |
q38722 | SortedSet.zrank | train | def zrank(self, name, value):
"""
Returns the rank of the element.
:param name: str the name of the redis key
:param value: the element in the sorted set
"""
with self.pipe as pipe:
value = self.valueparse.encode(value)
return pipe.zrank(self.redis_key(name), value) | python | {
"resource": ""
} |
q38723 | SortedSet.zlexcount | train | def zlexcount(self, name, min, max):
"""
Return the number of items in the sorted set between the
lexicographical range ``min`` and ``max``.
:param name: str the name of the redis key
:param min: int or '-inf'
:param max: int or '+inf'
:return: Future()
"""
with self.pipe as pipe:
return pipe.zlexcount(self.redis_key(name), min, max) | python | {
"resource": ""
} |
q38724 | SortedSet.zrevrangebylex | train | def zrevrangebylex(self, name, max, min, start=None, num=None):
"""
Return the reversed lexicographical range of values from the sorted set
between ``max`` and ``min``.
If ``start`` and ``num`` are specified, then return a slice of the
range.
:param name: str the name of the redis key
:param max: int or '+inf'
:param min: int or '-inf'
:param start: int
:param num: int
:return: Future()
"""
with self.pipe as pipe:
f = Future()
res = pipe.zrevrangebylex(self.redis_key(name), max, min,
start=start, num=num)
def cb():
f.set([self.valueparse.decode(v) for v in res])
pipe.on_execute(cb)
return f | python | {
"resource": ""
} |
q38725 | SortedSet.zremrangebylex | train | def zremrangebylex(self, name, min, max):
"""
Remove all elements in the sorted set between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
:param name: str the name of the redis key
:param min: int or -inf
:param max: into or +inf
:return: Future()
"""
with self.pipe as pipe:
return pipe.zremrangebylex(self.redis_key(name), min, max) | python | {
"resource": ""
} |
q38726 | SortedSet.zscan_iter | train | def zscan_iter(self, name, match=None, count=None,
score_cast_func=float):
"""
Make an iterator using the ZSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
if self._pipe is not None:
raise InvalidOperation('cannot pipeline scan operations')
cursor = '0'
while cursor != 0:
cursor, data = self.zscan(name, cursor=cursor, match=match,
count=count,
score_cast_func=score_cast_func)
for item in data:
yield item | python | {
"resource": ""
} |
q38727 | Hash._value_encode | train | def _value_encode(cls, member, value):
"""
Internal method used to encode values into the hash.
:param member: str
:param value: multi
:return: bytes
"""
try:
field_validator = cls.fields[member]
except KeyError:
return cls.valueparse.encode(value)
return field_validator.encode(value) | python | {
"resource": ""
} |
q38728 | Hash._value_decode | train | def _value_decode(cls, member, value):
"""
Internal method used to decode values from redis hash
:param member: str
:param value: bytes
:return: multi
"""
if value is None:
return None
try:
field_validator = cls.fields[member]
except KeyError:
return cls.valueparse.decode(value)
return field_validator.decode(value) | python | {
"resource": ""
} |
q38729 | Hash.hlen | train | def hlen(self, name):
"""
Returns the number of elements in the Hash.
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
return pipe.hlen(self.redis_key(name)) | python | {
"resource": ""
} |
q38730 | Hash.hstrlen | train | def hstrlen(self, name, key):
"""
Return the number of bytes stored in the value of ``key``
within hash ``name``
"""
with self.pipe as pipe:
return pipe.hstrlen(self.redis_key(name), key) | python | {
"resource": ""
} |
q38731 | Hash.hset | train | def hset(self, name, key, value):
"""
Set ``member`` in the Hash at ``value``.
:param name: str the name of the redis key
:param value:
:param key: the member of the hash key
:return: Future()
"""
with self.pipe as pipe:
value = self._value_encode(key, value)
key = self.memberparse.encode(key)
return pipe.hset(self.redis_key(name), key, value) | python | {
"resource": ""
} |
q38732 | Hash.hdel | train | def hdel(self, name, *keys):
"""
Delete one or more hash field.
:param name: str the name of the redis key
:param keys: on or more members to remove from the key.
:return: Future()
"""
with self.pipe as pipe:
m_encode = self.memberparse.encode
keys = [m_encode(m) for m in self._parse_values(keys)]
return pipe.hdel(self.redis_key(name), *keys) | python | {
"resource": ""
} |
q38733 | Hash.hkeys | train | def hkeys(self, name):
"""
Returns all fields name in the Hash.
:param name: str the name of the redis key
:return: Future
"""
with self.pipe as pipe:
f = Future()
res = pipe.hkeys(self.redis_key(name))
def cb():
m_decode = self.memberparse.decode
f.set([m_decode(v) for v in res.result])
pipe.on_execute(cb)
return f | python | {
"resource": ""
} |
q38734 | Hash.hgetall | train | def hgetall(self, name):
"""
Returns all the fields and values in the Hash.
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
f = Future()
res = pipe.hgetall(self.redis_key(name))
def cb():
data = {}
m_decode = self.memberparse.decode
v_decode = self._value_decode
for k, v in res.result.items():
k = m_decode(k)
v = v_decode(k, v)
data[k] = v
f.set(data)
pipe.on_execute(cb)
return f | python | {
"resource": ""
} |
q38735 | Hash.hget | train | def hget(self, name, key):
"""
Returns the value stored in the field, None if the field doesn't exist.
:param name: str the name of the redis key
:param key: the member of the hash
:return: Future()
"""
with self.pipe as pipe:
f = Future()
res = pipe.hget(self.redis_key(name),
self.memberparse.encode(key))
def cb():
f.set(self._value_decode(key, res.result))
pipe.on_execute(cb)
return f | python | {
"resource": ""
} |
q38736 | Hash.hexists | train | def hexists(self, name, key):
"""
Returns ``True`` if the field exists, ``False`` otherwise.
:param name: str the name of the redis key
:param key: the member of the hash
:return: Future()
"""
with self.pipe as pipe:
return pipe.hexists(self.redis_key(name),
self.memberparse.encode(key)) | python | {
"resource": ""
} |
q38737 | Hash.hincrby | train | def hincrby(self, name, key, amount=1):
"""
Increment the value of the field.
:param name: str the name of the redis key
:param increment: int
:param field: str
:return: Future()
"""
with self.pipe as pipe:
return pipe.hincrby(self.redis_key(name),
self.memberparse.encode(key),
amount) | python | {
"resource": ""
} |
q38738 | Hash.hmget | train | def hmget(self, name, keys, *args):
"""
Returns the values stored in the fields.
:param name: str the name of the redis key
:param fields:
:return: Future()
"""
member_encode = self.memberparse.encode
keys = [k for k in self._parse_values(keys, args)]
with self.pipe as pipe:
f = Future()
res = pipe.hmget(self.redis_key(name),
[member_encode(k) for k in keys])
def cb():
f.set([self._value_decode(keys[i], v)
for i, v in enumerate(res.result)])
pipe.on_execute(cb)
return f | python | {
"resource": ""
} |
q38739 | Hash.hmset | train | def hmset(self, name, mapping):
"""
Sets or updates the fields with their corresponding values.
:param name: str the name of the redis key
:param mapping: a dict with keys and values
:return: Future()
"""
with self.pipe as pipe:
m_encode = self.memberparse.encode
mapping = {m_encode(k): self._value_encode(k, v)
for k, v in mapping.items()}
return pipe.hmset(self.redis_key(name), mapping) | python | {
"resource": ""
} |
q38740 | Wrapper.initialize | train | def initialize(self, emt_id, emt_pass):
"""Manual initialization of the interface attributes.
This is useful when the interface must be declare but initialized later
on with parsed configuration values.
Args:
emt_id (str): ID given by the server upon registration
emt_pass (str): Token given by the server upon registration
"""
self._emt_id = emt_id
self._emt_pass = emt_pass
# Initialize modules
self.bus = BusApi(self)
self.geo = GeoApi(self)
self.parking = ParkingApi(self) | python | {
"resource": ""
} |
q38741 | Wrapper.request_openbus | train | def request_openbus(self, service, endpoint, **kwargs):
"""Make a request to the given endpoint of the ``openbus`` server.
This returns the plain JSON (dict) response which can then be parsed
using one of the implemented types.
Args:
service (str): Service to fetch ('bus' or 'geo').
endpoint (str): Endpoint to send the request to.
This string corresponds to the key in the ``ENDPOINTS`` dict.
**kwargs: Request arguments.
Returns:
Obtained response (dict) or None if the endpoint was not found.
"""
if service == 'bus':
endpoints = ENDPOINTS_BUS
elif service == 'geo':
endpoints = ENDPOINTS_GEO
else:
# Unknown service
return None
if endpoint not in endpoints:
# Unknown endpoint
return None
url = URL_OPENBUS + endpoints[endpoint]
# Append credentials to request
kwargs['idClient'] = self._emt_id
kwargs['passKey'] = self._emt_pass
# SSL verification fails...
# return requests.post(url, data=kwargs, verify=False).json()
return requests.post(url, data=kwargs, verify=True).json() | python | {
"resource": ""
} |
q38742 | Wrapper.request_parking | train | def request_parking(self, endpoint, url_args={}, **kwargs):
"""Make a request to the given endpoint of the ``parking`` server.
This returns the plain JSON (dict) response which can then be parsed
using one of the implemented types.
Args:
endpoint (str): Endpoint to send the request to.
This string corresponds to the key in the ``ENDPOINTS`` dict.
url_args (dict): Dictionary for URL string replacements.
**kwargs: Request arguments.
Returns:
Obtained response (dict) or None if the endpoint was not found.
"""
if endpoint not in ENDPOINTS_PARKING:
# Unknown endpoint
return None
url = URL_OPENBUS + ENDPOINTS_PARKING[endpoint]
# Append additional info to URL
lang = url_args.get('lang', 'ES')
address = url_args.get('address', '')
url = url.format(
id_client=self._emt_id,
passkey=self._emt_pass,
address=address,
lang=lang
)
# This server uses TLSv1
return _parking_req.post(url, data=kwargs).json() | python | {
"resource": ""
} |
q38743 | parseSearchTerm | train | def parseSearchTerm(term):
"""
Turn a string search query into a two-tuple of a search term and a
dictionary of search keywords.
"""
terms = []
keywords = {}
for word in term.split():
if word.count(':') == 1:
k, v = word.split(u':')
if k and v:
keywords[k] = v
elif k or v:
terms.append(k or v)
else:
terms.append(word)
term = u' '.join(terms)
if keywords:
return term, keywords
return term, None | python | {
"resource": ""
} |
q38744 | gtpswd | train | def gtpswd(prompt, confirmPassword):
"""
Temporary wrapper for Twisted's getPassword until a version that supports
customizing the 'confirm' prompt is released.
"""
try:
return util.getPassword(prompt=prompt,
confirmPrompt=confirmPassword,
confirm=True)
except TypeError:
return util.getPassword(prompt=prompt,
confirm=True) | python | {
"resource": ""
} |
q38745 | Mantissa._createCert | train | def _createCert(self, hostname, serial):
"""
Create a self-signed X.509 certificate.
@type hostname: L{unicode}
@param hostname: The hostname this certificate should be valid for.
@type serial: L{int}
@param serial: The serial number the certificate should have.
@rtype: L{bytes}
@return: The serialized certificate in PEM format.
"""
privateKey = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend())
publicKey = privateKey.public_key()
name = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, hostname)])
certificate = (
x509.CertificateBuilder()
.subject_name(name)
.issuer_name(name)
.not_valid_before(datetime.today() - timedelta(days=1))
.not_valid_after(datetime.today() + timedelta(days=365))
.serial_number(serial)
.public_key(publicKey)
.add_extension(
x509.BasicConstraints(ca=False, path_length=None),
critical=True)
.add_extension(
x509.SubjectAlternativeName([
x509.DNSName(hostname)]),
critical=False)
.add_extension(
x509.KeyUsage(
digital_signature=True,
content_commitment=False,
key_encipherment=True,
data_encipherment=False,
key_agreement=False,
key_cert_sign=False,
crl_sign=False,
encipher_only=False,
decipher_only=False),
critical=True)
.add_extension(
x509.ExtendedKeyUsage([
ExtendedKeyUsageOID.SERVER_AUTH]),
critical=False)
.sign(
private_key=privateKey,
algorithm=hashes.SHA256(),
backend=default_backend()))
return '\n'.join([
privateKey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()),
certificate.public_bytes(
encoding=serialization.Encoding.PEM),
]) | python | {
"resource": ""
} |
q38746 | Mantissa.installSite | train | def installSite(self, siteStore, domain, publicURL, generateCert=True):
"""
Create the necessary items to run an HTTP server and an SSH server.
"""
certPath = siteStore.filesdir.child("server.pem")
if generateCert and not certPath.exists():
certPath.setContent(self._createCert(domain, genSerial()))
# Install the base Mantissa offering.
IOfferingTechnician(siteStore).installOffering(baseOffering)
# Make the HTTP server baseOffering includes listen somewhere.
site = siteStore.findUnique(SiteConfiguration)
site.hostname = domain
installOn(
TCPPort(store=siteStore, factory=site, portNumber=8080),
siteStore)
installOn(
SSLPort(store=siteStore, factory=site, portNumber=8443,
certificatePath=certPath),
siteStore)
# Make the SSH server baseOffering includes listen somewhere.
shell = siteStore.findUnique(SecureShellConfiguration)
installOn(
TCPPort(store=siteStore, factory=shell, portNumber=8022),
siteStore)
# Install a front page on the top level store so that the
# developer will have something to look at when they start up
# the server.
fp = siteStore.findOrCreate(publicweb.FrontPage, prefixURL=u'')
installOn(fp, siteStore) | python | {
"resource": ""
} |
q38747 | Tag.fetch | train | def fetch(self):
"""
Fetch & return a new `Tag` object representing the tag's current state
:rtype: Tag
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the tag no longer exists)
"""
api = self.doapi_manager
return api._tag(api.request(self.url)["tag"]) | python | {
"resource": ""
} |
q38748 | Tag.add | train | def add(self, *resources):
"""
Apply the tag to one or more resources
:param resources: one or more `Resource` objects to which tags can be
applied
:return: `None`
:raises DOAPIError: if the API endpoint replies with an error
"""
self.doapi_manager.request(self.url + '/resources', method='POST',
data={"resources": _to_taggable(resources)}) | python | {
"resource": ""
} |
q38749 | Tag.act_on_droplets | train | def act_on_droplets(self, **data):
r"""
Perform an arbitrary action on all of the droplets to which the tag is
applied. ``data`` will be serialized as JSON and POSTed to the proper
API endpoint. All currently-documented actions require the POST body
to be a JSON object containing, at a minimum, a ``"type"`` field.
:return: a generator of `Action`\ s representing the in-progress
operations on the droplets
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
api = self.doapi_manager
return map(api._action, api.request('/v2/droplets/actions', method='POST', params={"tag_name": self.name}, data=data)["actions"]) | python | {
"resource": ""
} |
q38750 | add_badge_roles | train | def add_badge_roles(app):
"""Add ``badge`` role to your sphinx documents. It can create
a colorful badge inline.
"""
from docutils.nodes import inline, make_id
from docutils.parsers.rst.roles import set_classes
def create_badge_role(color=None):
def badge_role(name, rawtext, text, lineno, inliner,
options=None, content=None):
options = options or {}
set_classes(options)
classes = ['badge']
if color is None:
classes.append('badge-' + make_id(text))
else:
classes.append('badge-' + color)
if len(text) == 1:
classes.append('badge-one')
options['classes'] = classes
node = inline(rawtext, text, **options)
return [node], []
return badge_role
app.add_role('badge', create_badge_role())
app.add_role('badge-red', create_badge_role('red'))
app.add_role('badge-blue', create_badge_role('blue'))
app.add_role('badge-green', create_badge_role('green'))
app.add_role('badge-yellow', create_badge_role('yellow')) | python | {
"resource": ""
} |
q38751 | TaskManager.promise | train | def promise(cls, fn, *args, **kwargs):
"""
Used to build a task based on a callable function and the arguments.
Kick it off and start execution of the task.
:param fn: callable
:param args: tuple
:param kwargs: dict
:return: SynchronousTask or AsynchronousTask
"""
task = cls.task(target=fn, args=args, kwargs=kwargs)
task.start()
return task | python | {
"resource": ""
} |
q38752 | _interfacesToNames | train | def _interfacesToNames(interfaces):
"""
Convert from a list of interfaces to a unicode string of names suitable for
storage in the database.
@param interfaces: an iterable of Interface objects.
@return: a unicode string, a comma-separated list of names of interfaces.
@raise ConflictingNames: if any of the names conflict: see
L{_checkConflictingNames}.
"""
if interfaces is ALL_IMPLEMENTED:
names = ALL_IMPLEMENTED_DB
else:
_checkConflictingNames(interfaces)
names = u','.join(map(qual, interfaces))
return names | python | {
"resource": ""
} |
q38753 | upgradeShare1to2 | train | def upgradeShare1to2(oldShare):
"Upgrader from Share version 1 to version 2."
sharedInterfaces = []
attrs = set(oldShare.sharedAttributeNames.split(u','))
for iface in implementedBy(oldShare.sharedItem.__class__):
if set(iface) == attrs or attrs == set('*'):
sharedInterfaces.append(iface)
newShare = oldShare.upgradeVersion('sharing_share', 1, 2,
shareID=oldShare.shareID,
sharedItem=oldShare.sharedItem,
sharedTo=oldShare.sharedTo,
sharedInterfaces=sharedInterfaces)
return newShare | python | {
"resource": ""
} |
q38754 | getAuthenticatedRole | train | def getAuthenticatedRole(store):
"""
Get the base 'Authenticated' role for this store, which is the role that is
given to every user who is explicitly identified by a non-anonymous
username.
"""
def tx():
def addToEveryone(newAuthenticatedRole):
newAuthenticatedRole.becomeMemberOf(getEveryoneRole(store))
return newAuthenticatedRole
return store.findOrCreate(Role, addToEveryone, externalID=u'Authenticated')
return store.transact(tx) | python | {
"resource": ""
} |
q38755 | getPrimaryRole | train | def getPrimaryRole(store, primaryRoleName, createIfNotFound=False):
"""
Get Role object corresponding to an identifier name. If the role name
passed is the empty string, it is assumed that the user is not
authenticated, and the 'Everybody' role is primary. If the role name
passed is non-empty, but has no corresponding role, the 'Authenticated'
role - which is a member of 'Everybody' - is primary. Finally, a specific
role can be primary if one exists for the user's given credentials, that
will automatically always be a member of 'Authenticated', and by extension,
of 'Everybody'.
@param primaryRoleName: a unicode string identifying the role to be
retrieved. This corresponds to L{Role}'s externalID attribute.
@param createIfNotFound: a boolean. If True, create a role for the given
primary role name if no exact match is found. The default, False, will
instead retrieve the 'nearest match' role, which can be Authenticated or
Everybody depending on whether the user is logged in or not.
@return: a L{Role}.
"""
if not primaryRoleName:
return getEveryoneRole(store)
ff = store.findUnique(Role, Role.externalID == primaryRoleName, default=None)
if ff is not None:
return ff
authRole = getAuthenticatedRole(store)
if createIfNotFound:
role = Role(store=store,
externalID=primaryRoleName)
role.becomeMemberOf(authRole)
return role
return authRole | python | {
"resource": ""
} |
q38756 | _linearize | train | def _linearize(interface):
"""
Return a list of all the bases of a given interface in depth-first order.
@param interface: an Interface object.
@return: a L{list} of Interface objects, the input in all its bases, in
subclass-to-base-class, depth-first order.
"""
L = [interface]
for baseInterface in interface.__bases__:
if baseInterface is not Interface:
L.extend(_linearize(baseInterface))
return L | python | {
"resource": ""
} |
q38757 | _commonParent | train | def _commonParent(zi1, zi2):
"""
Locate the common parent of two Interface objects.
@param zi1: a zope Interface object.
@param zi2: another Interface object.
@return: the rightmost common parent of the two provided Interface objects,
or None, if they have no common parent other than Interface itself.
"""
shorter, longer = sorted([_linearize(x)[::-1] for x in zi1, zi2],
key=len)
for n in range(len(shorter)):
if shorter[n] != longer[n]:
if n == 0:
return None
return shorter[n-1]
return shorter[-1] | python | {
"resource": ""
} |
q38758 | _checkConflictingNames | train | def _checkConflictingNames(interfaces):
"""
Raise an exception if any of the names present in the given interfaces
conflict with each other.
@param interfaces: a list of Zope Interface objects.
@return: None
@raise ConflictingNames: if any of the attributes of the provided
interfaces are the same, and they do not have a common base interface which
provides that name.
"""
names = {}
for interface in interfaces:
for name in interface:
if name in names:
otherInterface = names[name]
parent = _commonParent(interface, otherInterface)
if parent is None or name not in parent:
raise ConflictingNames("%s conflicts with %s over %s" % (
interface, otherInterface, name))
names[name] = interface | python | {
"resource": ""
} |
q38759 | asAccessibleTo | train | def asAccessibleTo(role, query):
"""
Return an iterable which yields the shared proxies that are available to
the given role, from the given query.
This method is pending deprecation, and L{Role.asAccessibleTo} should be
preferred in new code.
@param role: The role to retrieve L{SharedProxy}s for.
@param query: An Axiom query describing the Items to retrieve, which this
role can access.
@type query: an L{iaxiom.IQuery} provider.
"""
warnings.warn(
"Use Role.asAccessibleTo() instead of sharing.asAccessibleTo().",
PendingDeprecationWarning,
stacklevel=2)
return role.asAccessibleTo(query) | python | {
"resource": ""
} |
q38760 | unShare | train | def unShare(sharedItem):
"""
Remove all instances of this item from public or shared view.
"""
sharedItem.store.query(Share, Share.sharedItem == sharedItem).deleteFromStore() | python | {
"resource": ""
} |
q38761 | randomEarlyShared | train | def randomEarlyShared(store, role):
"""
If there are no explicitly-published public index pages to display, find a
shared item to present to the user as first.
"""
for r in role.allRoles():
share = store.findFirst(Share, Share.sharedTo == r,
sort=Share.storeID.ascending)
if share is not None:
return share.sharedItem
raise NoSuchShare("Why, that user hasn't shared anything at all!") | python | {
"resource": ""
} |
q38762 | Role.allRoles | train | def allRoles(self, memo=None):
"""
Identify all the roles that this role is authorized to act as.
@param memo: used only for recursion. Do not pass this.
@return: an iterator of all roles that this role is a member of,
including itself.
"""
if memo is None:
memo = set()
elif self in memo:
# this is bad, but we have successfully detected and prevented the
# only really bad symptom, an infinite loop.
return
memo.add(self)
yield self
for groupRole in self.store.query(Role,
AND(RoleRelationship.member == self,
RoleRelationship.group == Role.storeID)):
for roleRole in groupRole.allRoles(memo):
yield roleRole | python | {
"resource": ""
} |
q38763 | Share.sharedInterfaces | train | def sharedInterfaces():
"""
This attribute is the public interface for code which wishes to discover
the list of interfaces allowed by this Share. It is a list of
Interface objects.
"""
def get(self):
if not self.sharedInterfaceNames:
return ()
if self.sharedInterfaceNames == ALL_IMPLEMENTED_DB:
I = implementedBy(self.sharedItem.__class__)
L = list(I)
T = tuple(L)
return T
else:
return tuple(map(namedAny, self.sharedInterfaceNames.split(u',')))
def set(self, newValue):
self.sharedAttributeNames = _interfacesToNames(newValue)
return get, set | python | {
"resource": ""
} |
q38764 | every_minute | train | def every_minute(dt=datetime.datetime.utcnow(), fmt=None):
"""
Just pass on the given date.
"""
date = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, 1, 0, dt.tzinfo)
if fmt is not None:
return date.strftime(fmt)
return date | python | {
"resource": ""
} |
q38765 | hourly | train | def hourly(dt=datetime.datetime.utcnow(), fmt=None):
"""
Get a new datetime object every hour.
"""
date = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, 1, 1, 0, dt.tzinfo)
if fmt is not None:
return date.strftime(fmt)
return date | python | {
"resource": ""
} |
q38766 | weekly | train | def weekly(date=datetime.date.today()):
"""
Weeks start are fixes at Monday for now.
"""
return date - datetime.timedelta(days=date.weekday()) | python | {
"resource": ""
} |
q38767 | biweekly | train | def biweekly(date=datetime.date.today()):
"""
Every two weeks.
"""
return datetime.date(date.year, date.month, 1 if date.day < 15 else 15) | python | {
"resource": ""
} |
q38768 | monthly | train | def monthly(date=datetime.date.today()):
"""
Take a date object and return the first day of the month.
"""
return datetime.date(date.year, date.month, 1) | python | {
"resource": ""
} |
q38769 | semiyearly | train | def semiyearly(date=datetime.date.today()):
"""
Twice a year.
"""
return datetime.date(date.year, 1 if date.month < 7 else 7, 1) | python | {
"resource": ""
} |
q38770 | TabularDataModel.resort | train | def resort(self, attributeID, isAscending=None):
"""Sort by one of my specified columns, identified by attributeID
"""
if isAscending is None:
isAscending = self.defaultSortAscending
newSortColumn = self.columns[attributeID]
if newSortColumn.sortAttribute() is None:
raise Unsortable('column %r has no sort attribute' % (attributeID,))
if self.currentSortColumn == newSortColumn:
# if this query is to be re-sorted on the same column, but in the
# opposite direction to our last query, then use the first item in
# the result set as the marker
if self.isAscending == isAscending:
offset = 0
else:
# otherwise use the last
offset = -1
else:
offset = 0
self.currentSortColumn = newSortColumn
self.isAscending = isAscending
self._updateResults(self._sortAttributeValue(offset), True) | python | {
"resource": ""
} |
q38771 | TabularDataModel.currentPage | train | def currentPage(self):
"""
Return a sequence of mappings of attribute IDs to column values, to
display to the user.
nextPage/prevPage will strive never to skip items whose column values
have not been returned by this method.
This is best explained by a demonstration. Let's say you have a table
viewing an item with attributes 'a' and 'b', like this:
oid | a | b
----+---+--
0 | 1 | 2
1 | 3 | 4
2 | 5 | 6
3 | 7 | 8
4 | 9 | 0
The table has 2 items per page. You call currentPage and receive a
page which contains items oid 0 and oid 1. item oid 1 is deleted.
If the next thing you do is to call nextPage, the result of currentPage
following that will be items beginning with item oid 2. This is
because although there are no longer enough items to populate a full
page from 0-1, the user has never seen item #2 on a page, so the 'next'
page from the user's point of view contains #2.
If instead, at that same point, the next thing you did was to call
currentPage, *then* nextPage and currentPage again, the first
currentPage results would contain items #0 and #2; the following
currentPage results would contain items #3 and #4. In this case, the
user *has* seen #2 already, so the user expects to see the following
item, not the same item again.
"""
self._updateResults(self._sortAttributeValue(0), equalToStart=True, refresh=True)
return self._currentResults | python | {
"resource": ""
} |
q38772 | TabularDataModel._sortAttributeValue | train | def _sortAttributeValue(self, offset):
"""
return the value of the sort attribute for the item at
'offset' in the results of the last query, otherwise None.
"""
if self._currentResults:
pageStart = (self._currentResults[offset][
self.currentSortColumn.attributeID],
self._currentResults[offset][
'__item__'].storeID)
else:
pageStart = None
return pageStart | python | {
"resource": ""
} |
q38773 | XKeyboard.open_display | train | def open_display(self):
"""Establishes connection with X server and prepares objects
necessary to retrieve and send data.
"""
self.close_display() # Properly finish previous open_display()
XkbIgnoreExtension(False)
display_name = None
major = c_int(XkbMajorVersion)
minor = c_int(XkbMinorVersion)
reason = c_int()
self._display = XkbOpenDisplay(
display_name,
None, None, byref(major), byref(minor), byref(reason))
if not self._display:
if reason.value in OPEN_DISPLAY_ERRORS:
# Assume POSIX conformance
display_name = os.getenv("DISPLAY") or "default"
raise X11Error(OPEN_DISPLAY_ERRORS[reason.value].format(
libname="xkbgroup",
used_major=XkbMajorVersion,
used_minor=XkbMinorVersion,
found_major=major.value,
found_minor=minor.value,
display_name=display_name)
+ ".")
else:
raise X11Error("Unknown error {} from XkbOpenDisplay.".format(reason.value))
self._keyboard_description = XkbGetMap(self._display, 0, XkbUseCoreKbd)
if not self._keyboard_description:
self.close_display()
raise X11Error("Failed to get keyboard description.")
# Controls mask doesn't affect the availability of xkb->ctrls->num_groups anyway
# Just use a valid value, and xkb->ctrls->num_groups will be definitely set
status = XkbGetControls(self._display, XkbAllControlsMask, self._keyboard_description)
if status != Success:
self.close_display()
raise X11Error(GET_CONTROLS_ERRORS[status] + ".")
names_mask = XkbSymbolsNameMask | XkbGroupNamesMask
status = XkbGetNames(self._display, names_mask, self._keyboard_description)
if status != Success:
self.close_display()
raise X11Error(GET_NAMES_ERRORS[status] + ".") | python | {
"resource": ""
} |
q38774 | XKeyboard.group_num | train | def group_num(self):
"""Current group number.
:getter: Returns current group number
:setter: Sets current group number
:type: int
"""
xkb_state = XkbStateRec()
XkbGetState(self._display, XkbUseCoreKbd, byref(xkb_state))
return xkb_state.group | python | {
"resource": ""
} |
q38775 | XKeyboard.group_symbol | train | def group_symbol(self):
"""Current group symbol.
:getter: Returns current group symbol
:setter: Sets current group symbol
:type: str
"""
s_mapping = {symdata.index: symdata.symbol for symdata in self._symboldata_list}
return s_mapping[self.group_num] | python | {
"resource": ""
} |
q38776 | _construct_register | train | def _construct_register(reg, default_reg):
"""Constructs a register dict."""
if reg:
x = dict((k, reg.get(k, d)) for k, d in default_reg.items())
else:
x = dict(default_reg)
return x | python | {
"resource": ""
} |
q38777 | PassingControl.pass_control_back | train | def pass_control_back(self, primary, secondary):
"""The address to which the controll is to be passed back.
Tells a potential controller device the address to which the control is
to be passed back.
:param primary: An integer in the range 0 to 30 representing the
primary address of the controller sending the command.
:param secondary: An integer in the range of 0 to 30 representing the
secondary address of the controller sending the command. If it is
missing, it indicates that the controller sending this command does
not have extended addressing.
"""
if secondary is None:
self._write(('*PCB', Integer(min=0, max=30)), primary)
else:
self._write(
('*PCB', [Integer(min=0, max=30), Integer(min=0, max=30)]),
primary,
secondary
) | python | {
"resource": ""
} |
q38778 | find_available_local_port | train | def find_available_local_port():
"""
Find a free port on localhost.
>>> 0 < find_available_local_port() < 65536
True
"""
infos = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)
family, proto, _, _, addr = next(iter(infos))
sock = socket.socket(family, proto)
sock.bind(addr)
addr, port = sock.getsockname()[:2]
sock.close()
return port | python | {
"resource": ""
} |
q38779 | Checker.assert_free | train | def assert_free(self, host, port=None):
"""
Assert that the given addr is free
in that all attempts to connect fail within the timeout
or raise a PortNotFree exception.
>>> free_port = find_available_local_port()
>>> Checker().assert_free('localhost', free_port)
>>> Checker().assert_free('127.0.0.1', free_port)
>>> Checker().assert_free('::1', free_port)
Also accepts an addr tuple
>>> addr = '::1', free_port, 0, 0
>>> Checker().assert_free(addr)
Host might refer to a server bind address like '::', which
should use localhost to perform the check.
>>> Checker().assert_free('::', free_port)
"""
if port is None and isinstance(host, abc.Sequence):
host, port = host[:2]
if platform.system() == 'Windows':
host = client_host(host)
info = socket.getaddrinfo(
host, port, socket.AF_UNSPEC, socket.SOCK_STREAM,
)
list(itertools.starmap(self._connect, info)) | python | {
"resource": ""
} |
q38780 | _FailedAnswer.redeliver | train | def redeliver(self):
"""
Re-deliver the answer to the consequence which previously handled it
by raising an exception.
This method is intended to be invoked after the code in question has
been upgraded. Since there are no buggy answer receivers in
production, nothing calls it yet.
"""
self.consequence.answerReceived(self.answerValue,
self.messageValue,
self.sender,
self.target)
self.deleteFromStore() | python | {
"resource": ""
} |
q38781 | MessageQueue.routeAnswer | train | def routeAnswer(self, originalSender, originalTarget, value, messageID):
"""
Route an incoming answer to a message originally sent by this queue.
"""
def txn():
qm = self._messageFromSender(originalSender, messageID)
if qm is None:
return
c = qm.consequence
if c is not None:
c.answerReceived(value, qm.value,
qm.sender, qm.target)
elif value.type == DELIVERY_ERROR:
try:
raise MessageTransportError(value.data)
except MessageTransportError:
log.err(Failure(),
"An unhandled delivery error occurred on a message"
" with no consequence.")
qm.deleteFromStore()
try:
self.store.transact(txn)
except:
log.err(Failure(),
"An unhandled error occurred while handling a response to "
"an inter-store message.")
def answerProcessingFailure():
qm = self._messageFromSender(originalSender, messageID)
_FailedAnswer.create(store=qm.store,
consequence=qm.consequence,
sender=originalSender,
target=originalTarget,
messageValue=qm.value,
answerValue=value)
qm.deleteFromStore()
self.store.transact(answerProcessingFailure)
return defer.succeed(None) | python | {
"resource": ""
} |
q38782 | MessageQueue._messageFromSender | train | def _messageFromSender(self, sender, messageID):
"""
Locate a previously queued message by a given sender and messageID.
"""
return self.store.findUnique(
_QueuedMessage,
AND(_QueuedMessage.senderUsername == sender.localpart,
_QueuedMessage.senderDomain == sender.domain,
_QueuedMessage.messageID == messageID),
default=None) | python | {
"resource": ""
} |
q38783 | MessageQueue._verifySender | train | def _verifySender(self, sender):
"""
Verify that this sender is valid.
"""
if self.store.findFirst(
LoginMethod,
AND(LoginMethod.localpart == sender.localpart,
LoginMethod.domain == sender.domain,
LoginMethod.internal == True)) is None:
raise BadSender(sender.localpart + u'@' + sender.domain,
[lm.localpart + u'@' + lm.domain
for lm in self.store.query(
LoginMethod, LoginMethod.internal == True)]) | python | {
"resource": ""
} |
q38784 | MessageQueue.queueMessage | train | def queueMessage(self, sender, target, value,
consequence=None):
"""
Queue a persistent outgoing message.
@param sender: The a description of the shared item that is the sender
of the message.
@type sender: L{xmantissa.sharing.Identifier}
@param target: The a description of the shared item that is the target
of the message.
@type target: L{xmantissa.sharing.Identifier}
@param consequence: an item stored in the same database as this
L{MessageQueue} implementing L{IDeliveryConsequence}.
"""
self.messageCounter += 1
_QueuedMessage.create(store=self.store,
sender=sender,
target=target,
value=value,
messageID=self.messageCounter,
consequence=consequence)
self._scheduleMePlease() | python | {
"resource": ""
} |
q38785 | _AMPExposer.expose | train | def expose(self, commandObject):
"""
Declare a method as being related to the given command object.
@param commandObject: a L{Command} subclass.
"""
thunk = super(_AMPExposer, self).expose(commandObject.commandName)
def thunkplus(function):
result = thunk(function)
result.command = commandObject
return result
return thunkplus | python | {
"resource": ""
} |
q38786 | _AMPExposer.responderForName | train | def responderForName(self, instance, commandName):
"""
When resolving a command to a method from the wire, the information
available is the command's name; look up a command.
@param instance: an instance of a class who has methods exposed via
this exposer's L{_AMPExposer.expose} method.
@param commandName: the C{commandName} attribute of a L{Command}
exposed on the given instance.
@return: a bound method with a C{command} attribute.
"""
method = super(_AMPExposer, self).get(instance, commandName)
return method | python | {
"resource": ""
} |
q38787 | _AMPErrorExposer.expose | train | def expose(self, commandObject, exceptionType):
"""
Expose a function for processing a given AMP error.
"""
thunk = super(_AMPErrorExposer, self).expose(
(commandObject.commandName,
commandObject.errors.get(exceptionType)))
def thunkplus(function):
result = thunk(function)
result.command = commandObject
result.exception = exceptionType
return result
return thunkplus | python | {
"resource": ""
} |
q38788 | AMPReceiver._boxFromData | train | def _boxFromData(self, messageData):
"""
A box.
@param messageData: a serialized AMP box representing either a message
or an error.
@type messageData: L{str}
@raise MalformedMessage: if the C{messageData} parameter does not parse
to exactly one AMP box.
"""
inputBoxes = parseString(messageData)
if not len(inputBoxes) == 1:
raise MalformedMessage()
[inputBox] = inputBoxes
return inputBox | python | {
"resource": ""
} |
q38789 | Consultant._get | train | def _get(self, resource, payload=None):
''' Wrapper around requests.get that shorten caller url and takes care
of errors '''
# Avoid dangerous default function argument `{}`
payload = payload or {}
# Build the request and return json response
return requests.get(
'{}/{}/{}'.format(
self.master, pyconsul.__consul_api_version__, resource),
params=payload
) | python | {
"resource": ""
} |
q38790 | Consultant._put | train | def _put(self, resource, payload=None):
''' Wrapper around requests.put that shorten caller url and takes care
of errors '''
# Avoid dangerous default function argument `{}`
payload = payload or {}
# Build the request and return json response
return requests.put(
'{}/{}/{}'.format(
self.master, pyconsul.__consul_api_version__, resource),
params=payload
) | python | {
"resource": ""
} |
q38791 | Database.table_names | train | def table_names(self):
"""Returns names of all tables in the database"""
query = "SELECT name FROM sqlite_master WHERE type='table'"
cursor = self.connection.execute(query)
results = cursor.fetchall()
return [result_tuple[0] for result_tuple in results] | python | {
"resource": ""
} |
q38792 | Database.drop_all_tables | train | def drop_all_tables(self):
"""Drop all tables in the database"""
for table_name in self.table_names():
self.execute_sql("DROP TABLE %s" % table_name)
self.connection.commit() | python | {
"resource": ""
} |
q38793 | Database.execute_sql | train | def execute_sql(self, sql, commit=False):
"""Log and then execute a SQL query"""
logger.info("Running sqlite query: \"%s\"", sql)
self.connection.execute(sql)
if commit:
self.connection.commit() | python | {
"resource": ""
} |
q38794 | Database.version | train | def version(self):
"""What's the version of this database? Found in metadata attached
by datacache when creating this database."""
query = "SELECT version FROM %s" % METADATA_TABLE_NAME
cursor = self.connection.execute(query)
version = cursor.fetchone()
if not version:
return 0
else:
return int(version[0]) | python | {
"resource": ""
} |
q38795 | Database._finalize_database | train | def _finalize_database(self, version):
"""
Create metadata table for database with version number.
Parameters
----------
version : int
Tag created database with user-specified version number
"""
require_integer(version, "version")
create_metadata_sql = \
"CREATE TABLE %s (version INT)" % METADATA_TABLE_NAME
self.execute_sql(create_metadata_sql)
insert_version_sql = \
"INSERT INTO %s VALUES (%s)" % (METADATA_TABLE_NAME, version)
self.execute_sql(insert_version_sql) | python | {
"resource": ""
} |
q38796 | Database._create_table | train | def _create_table(self, table_name, column_types, primary=None, nullable=()):
"""Creates a sqlite3 table from the given metadata.
Parameters
----------
column_types : list of (str, str) pairs
First element of each tuple is the column name, second element is the sqlite3 type
primary : str, optional
Which column is the primary key
nullable : iterable, optional
Names of columns which have null values
"""
require_string(table_name, "table name")
require_iterable_of(column_types, tuple, name="rows")
if primary is not None:
require_string(primary, "primary")
require_iterable_of(nullable, str, name="nullable")
column_decls = []
for column_name, column_type in column_types:
decl = "%s %s" % (column_name, column_type)
if column_name == primary:
decl += " UNIQUE PRIMARY KEY"
if column_name not in nullable:
decl += " NOT NULL"
column_decls.append(decl)
column_decl_str = ", ".join(column_decls)
create_table_sql = \
"CREATE TABLE %s (%s)" % (table_name, column_decl_str)
self.execute_sql(create_table_sql) | python | {
"resource": ""
} |
q38797 | Database.create | train | def create(self, tables, version):
"""Do the actual work of creating the database, filling its tables with
values, creating indices, and setting the datacache version metadata.
Parameters
----------
tables : list
List of datacache.DatabaseTable objects
version : int
"""
for table in tables:
self._create_table(
table_name=table.name,
column_types=table.column_types,
primary=table.primary_key,
nullable=table.nullable)
self._fill_table(table.name, table.rows)
self._create_indices(table.name, table.indices)
self._finalize_database(version)
self._commit() | python | {
"resource": ""
} |
q38798 | Database._create_index | train | def _create_index(self, table_name, index_columns):
"""
Creates an index over multiple columns of a given table.
Parameters
----------
table_name : str
index_columns : iterable of str
Which columns should be indexed
"""
logger.info(
"Creating index on %s (%s)",
table_name,
", ".join(index_columns))
index_name = "%s_index_%s" % (
table_name,
"_".join(index_columns))
self.connection.execute(
"CREATE INDEX IF NOT EXISTS %s ON %s (%s)" % (
index_name,
table_name,
", ".join(index_columns))) | python | {
"resource": ""
} |
q38799 | MantissaViewHelper.locateChild | train | def locateChild(self, ctx, segments):
"""
Attempt to locate the child via the '.fragment' attribute, then fall
back to normal locateChild behavior.
"""
if self.fragment is not None:
# There are still a bunch of bogus subclasses of this class, which
# are used in a variety of distasteful ways. 'fragment' *should*
# always be set to something that isn't None, but there's no way to
# make sure that it will be for the moment. Every effort should be
# made to reduce public use of subclasses of this class (instead
# preferring to wrap content objects with
# IWebViewer.wrapModel()), so that the above check can be
# removed. -glyph
lc = getattr(self.fragment, 'locateChild', None)
if lc is not None:
x = lc(ctx, segments)
if x is not NotFound:
return x
return super(MantissaViewHelper, self).locateChild(ctx, segments) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.