_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q12600
|
Client._select_next_server
|
train
|
def _select_next_server(self):
"""
Looks up in the server pool for an available server
and attempts to connect.
"""
# Continue trying to connect until there is an available server
# or bail in case there are no more available servers.
while True:
if len(self._server_pool) == 0:
self._current_server = None
raise ErrNoServers
now = time.time()
s = self._server_pool.pop(0)
if self.options["max_reconnect_attempts"] > 0:
if s.reconnects > self.options["max_reconnect_attempts"]:
# Discard server since already tried to reconnect too many times.
continue
# Not yet exceeded max_reconnect_attempts so can still use
# this server in the future.
self._server_pool.append(s)
if s.last_attempt is not None and now < s.last_attempt + self.options["reconnect_time_wait"]:
# Backoff connecting to server if we attempted recently.
yield tornado.gen.sleep(self.options["reconnect_time_wait"])
try:
yield self._server_connect(s)
self._current_server = s
break
except Exception as e:
s.last_attempt = time.time()
s.reconnects += 1
self._err = e
if self._error_cb is not None:
self._error_cb(e)
self._status = Client.RECONNECTING
continue
|
python
|
{
"resource": ""
}
|
q12601
|
Client._close
|
train
|
def _close(self, status, do_callbacks=True):
"""
Takes the status on which it should leave the connection
and an optional boolean parameter to dispatch the disconnected
and close callbacks if there are any.
"""
if self.is_closed:
self._status = status
return
self._status = Client.CLOSED
# Stop background tasks
yield self._end_flusher_loop()
if self._ping_timer is not None and self._ping_timer.is_running():
self._ping_timer.stop()
if not self.io.closed():
self.io.close()
# Cleanup subscriptions since not reconnecting so no need
# to replay the subscriptions anymore.
for ssid, sub in self._subs.items():
self._subs.pop(ssid, None)
self._remove_subscription(sub)
if do_callbacks:
if self._disconnected_cb is not None:
self._disconnected_cb()
if self._closed_cb is not None:
self._closed_cb()
|
python
|
{
"resource": ""
}
|
q12602
|
Client.drain
|
train
|
def drain(self, sid=None):
"""
Drain will put a connection into a drain state. All subscriptions will
immediately be put into a drain state. Upon completion, the publishers
will be drained and can not publish any additional messages. Upon draining
of the publishers, the connection will be closed. Use the `closed_cb'
option to know when the connection has moved from draining to closed.
If a sid is passed, just the subscription with that sid will be drained
without closing the connection.
"""
if self.is_draining:
return
if self.is_closed:
raise ErrConnectionClosed
if self.is_connecting or self.is_reconnecting:
raise ErrConnectionReconnecting
# Drain a single subscription
if sid is not None:
raise tornado.gen.Return(self._drain_sub(sid))
# Start draining the subscriptions
self._status = Client.DRAINING_SUBS
drain_tasks = []
for ssid, sub in self._subs.items():
task = self._drain_sub(ssid)
drain_tasks.append(task)
# Wait for subscriptions to stop handling messages.
drain_is_done = tornado.gen.multi(drain_tasks)
try:
yield tornado.gen.with_timeout(
timedelta(seconds=self.options["drain_timeout"]),
drain_is_done,
)
except tornado.gen.TimeoutError:
if self._error_cb is not None:
yield self._error_cb(ErrDrainTimeout())
finally:
self._status = Client.DRAINING_PUBS
yield self.flush()
yield self.close()
|
python
|
{
"resource": ""
}
|
q12603
|
Client._process_err
|
train
|
def _process_err(self, err=None):
"""
Stores the last received error from the server and dispatches the error callback.
"""
self.stats['errors_received'] += 1
if err == "'Authorization Violation'":
self._err = ErrAuthorization
elif err == "'Slow Consumer'":
self._err = ErrSlowConsumer
elif err == "'Stale Connection'":
self._err = ErrStaleConnection
else:
self._err = Exception(err)
if self._error_cb is not None:
self._error_cb(err)
|
python
|
{
"resource": ""
}
|
q12604
|
Client._read_loop
|
train
|
def _read_loop(self, data=''):
"""
Read loop for gathering bytes from the server in a buffer
of maximum MAX_CONTROL_LINE_SIZE, then received bytes are streamed
to the parsing callback for processing.
"""
while True:
if not self.is_connected or self.is_connecting or self.io.closed():
break
try:
yield self.io.read_bytes(
DEFAULT_READ_CHUNK_SIZE,
streaming_callback=self._ps.parse,
partial=True)
except tornado.iostream.StreamClosedError as e:
self._err = e
if self._error_cb is not None and not self.is_reconnecting and not self.is_closed:
self._error_cb(e)
break
|
python
|
{
"resource": ""
}
|
q12605
|
Client._flusher_loop
|
train
|
def _flusher_loop(self):
"""
Coroutine which continuously tries to consume pending commands
and then flushes them to the socket.
"""
while True:
pending = []
pending_size = 0
try:
# Block and wait for the flusher to be kicked
yield self._flush_queue.get()
# Check whether we should bail first
if not self.is_connected or self.is_connecting or self.io.closed():
break
# Flush only when we actually have something in buffer...
if self._pending_size > 0:
cmds = b''.join(self._pending)
# Reset pending queue and store tmp in case write fails
self._pending, pending = [], self._pending
self._pending_size, pending_size = 0, self._pending_size
yield self.io.write(cmds)
except tornado.iostream.StreamBufferFullError:
# Acumulate as pending data size and flush when possible.
self._pending = pending + self._pending
self._pending_size += pending_size
except tornado.iostream.StreamClosedError as e:
self._pending = pending + self._pending
self._pending_size += pending_size
yield self._process_op_err(e)
|
python
|
{
"resource": ""
}
|
q12606
|
Client._end_flusher_loop
|
train
|
def _end_flusher_loop(self):
"""
Let flusher_loop coroutine quit - useful when disconnecting.
"""
if not self.is_connected or self.is_connecting or self.io.closed():
if self._flush_queue is not None and self._flush_queue.empty():
self._flush_pending(check_connected=False)
yield tornado.gen.moment
|
python
|
{
"resource": ""
}
|
q12607
|
yaml_load
|
train
|
def yaml_load(source, defaultdata=NO_DEFAULT):
"""merge YAML data from files found in source
Always returns a dict. The YAML files are expected to contain some kind of
key:value structures, possibly deeply nested. When merging, lists are
appended and dict keys are replaced. The YAML files are read with the
yaml.safe_load function.
source can be a file, a dir, a list/tuple of files or a string containing
a glob expression (with ?*[]).
For a directory, all *.yaml files will be read in alphabetical order.
defaultdata can be used to initialize the data.
"""
logger = logging.getLogger(__name__)
logger.debug("initialized with source=%s, defaultdata=%s", source, defaultdata)
if defaultdata is NO_DEFAULT:
data = None
else:
data = defaultdata
files = []
if type(source) is not str and len(source) == 1:
# when called from __main source is always a list, even if it contains only one item.
# turn into a string if it contains only one item to support our different call modes
source = source[0]
if type(source) is list or type(source) is tuple:
# got a list, assume to be files
files = source
elif os.path.isdir(source):
# got a dir, read all *.yaml files
files = sorted(glob.glob(os.path.join(source, "*.yaml")))
elif os.path.isfile(source):
# got a single file, turn it into list to use the same code
files = [source]
else:
# try to use the source as a glob
files = sorted(glob.glob(source))
if files:
logger.debug("Reading %s\n", ", ".join(files))
for yaml_file in files:
try:
with open(yaml_file) as f:
new_data = safe_load(f)
logger.debug("YAML LOAD: %s", new_data)
except MarkedYAMLError as e:
logger.error("YAML Error: %s", e)
raise YamlReaderError("YAML Error: %s" % str(e))
if new_data is not None:
data = data_merge(data, new_data)
else:
if defaultdata is NO_DEFAULT:
logger.error("No YAML data found in %s and no default data given", source)
raise YamlReaderError("No YAML data found in %s" % source)
return data
|
python
|
{
"resource": ""
}
|
q12608
|
BusinessTime.iterdays
|
train
|
def iterdays(self, d1, d2):
"""
Date iterator returning dates in d1 <= x < d2
"""
curr = datetime.datetime.combine(d1, datetime.time())
end = datetime.datetime.combine(d2, datetime.time())
if d1.date() == d2.date():
yield curr
return
while curr < end:
yield curr
curr = curr + datetime.timedelta(days=1)
|
python
|
{
"resource": ""
}
|
q12609
|
BusinessTime.iterweekdays
|
train
|
def iterweekdays(self, d1, d2):
"""
Date iterator returning dates in d1 <= x < d2, excluding weekends
"""
for dt in self.iterdays(d1, d2):
if not self.isweekend(dt):
yield dt
|
python
|
{
"resource": ""
}
|
q12610
|
BusinessTime.iterbusinessdays
|
train
|
def iterbusinessdays(self, d1, d2):
"""
Date iterator returning dates in d1 <= x < d2, excluding weekends and holidays
"""
assert d2 >= d1
if d1.date() == d2.date() and d2.time() < self.business_hours[0]:
return
first = True
for dt in self.iterdays(d1, d2):
if first and d1.time() > self.business_hours[1]:
first = False
continue
first = False
if not self.isweekend(dt) and not self.isholiday(dt):
yield dt
|
python
|
{
"resource": ""
}
|
q12611
|
BusinessTime.businesstimedelta
|
train
|
def businesstimedelta(self, d1, d2):
"""
Returns a datetime.timedelta with the number of full business days
and business time between d1 and d2
"""
if d1 > d2:
d1, d2, timedelta_direction = d2, d1, -1
else:
timedelta_direction = 1
businessdays = self._build_spanning_datetimes(d1, d2)
time = datetime.timedelta()
if len(businessdays) == 0:
# HACK: manually handle the case when d1 is after business hours while d2 is during
if self.isduringbusinesshours(d2):
time += d2 - datetime.datetime.combine(d2,
self.business_hours[0])
# HACK: manually handle the case where d1 is on an earlier non-business day and d2 is after hours on a business day
elif not self.isbusinessday(d1) and self.isbusinessday(d2):
if d2.time() > self.business_hours[1]:
time += datetime.datetime.combine(
d2,
self.business_hours[1]) - datetime.datetime.combine(
d2, self.business_hours[0])
elif d2.time() > self.business_hours[0]:
time += d2 - datetime.datetime.combine(
d2, self.business_hours[0])
else:
prev = None
current = None
count = 0
for d in businessdays:
if current is None:
current = d
current = datetime.datetime.combine(d, current.time())
if prev is not None:
if prev.date() != current.date():
time += datetime.timedelta(days=1)
if count == len(businessdays) - 1:
if current > d:
# We went too far
time -= datetime.timedelta(days=1)
time += self.open_hours - (current - d)
else:
time += d - current
count += 1
prev = current
return time * timedelta_direction
|
python
|
{
"resource": ""
}
|
q12612
|
BusinessTime.businesstime_hours
|
train
|
def businesstime_hours(self, d1, d2):
"""
Returns a datetime.timedelta of business hours between d1 and d2,
based on the length of the businessday
"""
open_hours = self.open_hours.seconds / 3600
btd = self.businesstimedelta(d1, d2)
btd_hours = btd.seconds / 3600
return datetime.timedelta(hours=(btd.days * open_hours + btd_hours))
|
python
|
{
"resource": ""
}
|
q12613
|
USFederalHolidays._day_rule_matches
|
train
|
def _day_rule_matches(self, rule, dt):
"""
Day-of-month-specific US federal holidays that fall on Sat or Sun are
observed on Fri or Mon respectively. Note that this method considers
both the actual holiday and the day of observance to be holidays.
"""
if dt.weekday() == 4:
sat = dt + datetime.timedelta(days=1)
if super(USFederalHolidays, self)._day_rule_matches(rule, sat):
return True
elif dt.weekday() == 0:
sun = dt - datetime.timedelta(days=1)
if super(USFederalHolidays, self)._day_rule_matches(rule, sun):
return True
return super(USFederalHolidays, self)._day_rule_matches(rule, dt)
|
python
|
{
"resource": ""
}
|
q12614
|
PickleShareDB.hcompress
|
train
|
def hcompress(self, hashroot):
""" Compress category 'hashroot', so hset is fast again
hget will fail if fast_only is True for compressed items (that were
hset before hcompress).
"""
hfiles = self.keys(hashroot + "/*")
all = {}
for f in hfiles:
# print "using",f
all.update(self[f])
self.uncache(f)
self[hashroot + '/xx'] = all
for f in hfiles:
p = self.root / f
if p.name == 'xx':
continue
p.unlink()
|
python
|
{
"resource": ""
}
|
q12615
|
PickleShareDB.keys
|
train
|
def keys(self, globpat = None):
""" All keys in DB, or all keys matching a glob"""
if globpat is None:
files = self.root.rglob('*')
else:
files = self.root.glob(globpat)
return [self._normalized(p) for p in files if p.is_file()]
|
python
|
{
"resource": ""
}
|
q12616
|
PickleShareDB.uncache
|
train
|
def uncache(self,*items):
""" Removes all, or specified items from cache
Use this after reading a large amount of large objects
to free up memory, when you won't be needing the objects
for a while.
"""
if not items:
self.cache = {}
for it in items:
self.cache.pop(it,None)
|
python
|
{
"resource": ""
}
|
q12617
|
hacking_has_only_comments
|
train
|
def hacking_has_only_comments(physical_line, filename, lines, line_number):
"""Check for empty files with only comments
H104 empty file with only comments
"""
if line_number == 1 and all(map(EMPTY_LINE_RE.match, lines)):
return (0, "H104: File contains nothing but comments")
|
python
|
{
"resource": ""
}
|
q12618
|
_project_is_apache
|
train
|
def _project_is_apache():
"""Determine if a project is Apache.
Look for a key string in a set of possible license files to figure out
if a project looks to be Apache. This is used as a precondition for
enforcing license headers.
"""
global _is_apache_cache
if _is_apache_cache is not None:
return _is_apache_cache
license_files = ["LICENSE"]
for filename in license_files:
try:
with open(filename, "r") as file:
for line in file:
if re.search('Apache License', line):
_is_apache_cache = True
return True
except IOError:
pass
_is_apache_cache = False
return False
|
python
|
{
"resource": ""
}
|
q12619
|
_check_for_exact_apache
|
train
|
def _check_for_exact_apache(start, lines):
"""Check for the Apache 2.0 license header.
We strip all the newlines and extra spaces so this license string
should work regardless of indentation in the file.
"""
APACHE2 = """
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License."""
# out of all the formatting I've seen, a 12 line version seems to be the
# longest in the source tree. So just take the 12 lines starting with where
# the Apache starting words were found, strip all the '#' and collapse the
# spaces.
content = ''.join(lines[start:(start + 12)])
content = re.sub('\#', '', content)
content = re.sub('\s+', ' ', content).strip()
stripped_apache2 = re.sub('\s+', ' ', APACHE2).strip()
if stripped_apache2 in content:
return True
else:
print("<license>!=<apache2>:\n'%s' !=\n'%s'" %
(content, stripped_apache2))
return False
|
python
|
{
"resource": ""
}
|
q12620
|
hacking_no_author_tags
|
train
|
def hacking_no_author_tags(physical_line):
"""Check that no author tags are used.
H105 don't use author tags
"""
for regex in AUTHOR_TAG_RE:
if regex.match(physical_line):
physical_line = physical_line.lower()
pos = physical_line.find('moduleauthor')
if pos < 0:
pos = physical_line.find('author')
return (pos, "H105: Don't use author tags")
|
python
|
{
"resource": ""
}
|
q12621
|
hacking_python3x_except_compatible
|
train
|
def hacking_python3x_except_compatible(logical_line, noqa):
r"""Check for except statements to be Python 3.x compatible
As of Python 3.x, the construct 'except x,y:' has been removed.
Use 'except x as y:' instead.
Okay: try:\n pass\nexcept Exception:\n pass
Okay: try:\n pass\nexcept (Exception, AttributeError):\n pass
H231: try:\n pass\nexcept AttributeError, e:\n pass
Okay: try:\n pass\nexcept AttributeError, e: # noqa\n pass
"""
if noqa:
return
def is_old_style_except(logical_line):
return (',' in logical_line and
')' not in logical_line.rpartition(',')[2])
if (logical_line.startswith("except ") and
logical_line.endswith(':') and
is_old_style_except(logical_line)):
yield 0, "H231: Python 3.x incompatible 'except x,y:' construct"
|
python
|
{
"resource": ""
}
|
q12622
|
hacking_python3x_octal_literals
|
train
|
def hacking_python3x_octal_literals(logical_line, tokens, noqa):
r"""Check for octal literals in Python 3.x compatible form.
As of Python 3.x, the construct "0755" has been removed.
Use "0o755" instead".
Okay: f(0o755)
Okay: 'f(0755)'
Okay: f(755)
Okay: f(0)
Okay: f(000)
Okay: MiB = 1.0415
H232: f(0755)
Okay: f(0755) # noqa
"""
if noqa:
return
for token_type, text, _, _, _ in tokens:
if token_type == tokenize.NUMBER:
match = RE_OCTAL.match(text)
if match:
yield 0, ("H232: Python 3.x incompatible octal %s should be "
"written as 0o%s " %
(match.group(0)[1:], match.group(1)))
|
python
|
{
"resource": ""
}
|
q12623
|
hacking_python3x_print_function
|
train
|
def hacking_python3x_print_function(logical_line, noqa):
r"""Check that all print occurrences look like print functions.
Check that all occurrences of print look like functions, not
print operator. As of Python 3.x, the print operator has
been removed.
Okay: print(msg)
Okay: print (msg)
Okay: print msg # noqa
Okay: print()
H233: print msg
H233: print >>sys.stderr, "hello"
H233: print msg,
H233: print
"""
if noqa:
return
for match in RE_PRINT.finditer(logical_line):
yield match.start(0), (
"H233: Python 3.x incompatible use of print operator")
|
python
|
{
"resource": ""
}
|
q12624
|
hacking_python3x_metaclass
|
train
|
def hacking_python3x_metaclass(logical_line, noqa):
r"""Check for metaclass to be Python 3.x compatible.
Okay: @six.add_metaclass(Meta)\nclass Foo(object):\n pass
Okay: @six.with_metaclass(Meta)\nclass Foo(object):\n pass
Okay: class Foo(object):\n '''docstring\n\n __metaclass__ = Meta\n'''
H236: class Foo(object):\n __metaclass__ = Meta
H236: class Foo(object):\n foo=bar\n __metaclass__ = Meta
H236: class Foo(object):\n '''docstr.'''\n __metaclass__ = Meta
H236: class Foo(object):\n __metaclass__ = \\\n Meta
Okay: class Foo(object):\n __metaclass__ = Meta # noqa
"""
if noqa:
return
split_line = logical_line.split()
if(len(split_line) > 2 and split_line[0] == '__metaclass__' and
split_line[1] == '='):
yield (logical_line.find('__metaclass__'),
"H236: Python 3.x incompatible __metaclass__, "
"use six.add_metaclass()")
|
python
|
{
"resource": ""
}
|
q12625
|
hacking_no_removed_module
|
train
|
def hacking_no_removed_module(logical_line, noqa):
r"""Check for removed modules in Python 3.
Examples:
Okay: from os import path
Okay: from os import path as p
Okay: from os import (path as p)
Okay: import os.path
H237: import thread
Okay: import thread # noqa
H237: import commands
H237: import md5 as std_md5
"""
if noqa:
return
line = core.import_normalize(logical_line.strip())
if line and line.split()[0] == 'import':
module_name = line.split()[1].split('.')[0]
if module_name in removed_modules:
yield 0, ("H237: module %s is "
"removed in Python 3" % module_name)
|
python
|
{
"resource": ""
}
|
q12626
|
hacking_no_old_style_class
|
train
|
def hacking_no_old_style_class(logical_line, noqa):
r"""Check for old style classes.
Examples:
Okay: class Foo(object):\n pass
Okay: class Foo(Bar, Baz):\n pass
Okay: class Foo(object, Baz):\n pass
Okay: class Foo(somefunc()):\n pass
H238: class Bar:\n pass
H238: class Bar():\n pass
"""
if noqa:
return
line = core.import_normalize(logical_line.strip())
if line.startswith("class ") and not RE_NEW_STYLE_CLASS.match(line):
yield (0, "H238: old style class declaration, "
"use new style (inherit from `object`)")
|
python
|
{
"resource": ""
}
|
q12627
|
no_vim_headers
|
train
|
def no_vim_headers(physical_line, line_number, lines):
r"""Check for vim editor configuration in source files.
By default vim modelines can only appear in the first or
last 5 lines of a source file.
Examples:
H106: # vim: set tabstop=4 shiftwidth=4\n#\n#\n#\n#\n#
H106: # Lic\n# vim: set tabstop=4 shiftwidth=4\n#\n#\n#\n#\n#
H106: # Lic\n#\n#\n#\n#\n#\n#\n#\n#\n# vim: set tabstop=4 shiftwidth=4
Okay: # Lic\n#\n#\n#\n#\n#\n#\n#
Okay: # viminal hill is located in Rome
Okay: # vim, ze nemluvis cesky
"""
if ((line_number <= 5 or line_number > len(lines) - 5) and
vim_header_re.match(physical_line)):
return 0, "H106: Don't put vim configuration in source files"
|
python
|
{
"resource": ""
}
|
q12628
|
hacking_import_rules
|
train
|
def hacking_import_rules(logical_line, filename, noqa):
r"""Check for imports.
OpenStack HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
Okay: from nova.compute import api
H301: from nova.compute import api, utils
Do not use wildcard import
Do not make relative imports
Examples:
Okay: from os import path
Okay: from os import path as p
Okay: from os import (path as p)
Okay: import os.path
Okay: from nova.compute import rpcapi
Okay: from six.moves.urllib import parse
H303: from os.path import *
H304: from .compute import rpcapi
"""
# TODO(jogo): make the following doctests pass:
# H301: import os, sys
# TODO(mordred: We need to split this into different checks so that they
# can be disabled by command line switches properly
if noqa:
return
split_line = logical_line.split()
split_line_len = len(split_line)
if (split_line_len > 1 and split_line[0] in ('import', 'from') and
not core.is_import_exception(split_line[1])):
pos = logical_line.find(',')
if pos != -1:
if split_line[0] == 'from':
yield pos, "H301: one import per line"
pos = logical_line.find('*')
if pos != -1:
yield pos, "H303: No wildcard (*) import."
return
if split_line_len in (2, 4, 6) and split_line[1] != "__future__":
if 'from' == split_line[0] and split_line_len > 3:
mod = '.'.join((split_line[1], split_line[3]))
if core.is_import_exception(mod):
return
if RE_RELATIVE_IMPORT.search(logical_line):
yield logical_line.find('.'), (
"H304: No relative imports. '%s' is a relative import"
% logical_line)
return
|
python
|
{
"resource": ""
}
|
q12629
|
hacking_import_alphabetical
|
train
|
def hacking_import_alphabetical(logical_line, blank_before, previous_logical,
indent_level, previous_indent_level):
r"""Check for imports in alphabetical order.
OpenStack HACKING guide recommendation for imports:
imports in human alphabetical order
Okay: import os\nimport sys\n\nimport nova\nfrom nova import test
Okay: import os\nimport sys
H306: import sys\nimport os
Okay: import sys\n\n# foo\nimport six
"""
# handle import x
# use .lower since capitalization shouldn't dictate order
if blank_before < 1 and indent_level == previous_indent_level:
split_line = core.import_normalize(logical_line.
strip()).lower().split()
split_previous = core.import_normalize(previous_logical.
strip()).lower().split()
length = [2, 4]
if (len(split_line) in length and len(split_previous) in length and
split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
yield (0, "H306: imports not in alphabetical order (%s, %s)"
% (split_previous[1], split_line[1]))
|
python
|
{
"resource": ""
}
|
q12630
|
is_import_exception
|
train
|
def is_import_exception(mod):
"""Check module name to see if import has been whitelisted.
Import based rules should not run on any whitelisted module
"""
return (mod in IMPORT_EXCEPTIONS or
any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS))
|
python
|
{
"resource": ""
}
|
q12631
|
hacking_docstring_start_space
|
train
|
def hacking_docstring_start_space(physical_line, previous_logical, tokens):
r"""Check for docstring not starting with space.
OpenStack HACKING guide recommendation for docstring:
Docstring should not start with space
Okay: def foo():\n '''This is good.'''
Okay: def foo():\n r'''This is good.'''
Okay: def foo():\n a = ''' This is not a docstring.'''
Okay: def foo():\n pass\n ''' This is not.'''
H401: def foo():\n ''' This is not.'''
H401: def foo():\n r''' This is not.'''
"""
docstring = is_docstring(tokens, previous_logical)
if docstring:
start, start_triple = _find_first_of(docstring, START_DOCSTRING_TRIPLE)
if docstring[len(start_triple)] == ' ':
# docstrings get tokenized on the last line of the docstring, so
# we don't know the exact position.
return (0, "H401: docstring should not start with"
" a space")
|
python
|
{
"resource": ""
}
|
q12632
|
hacking_docstring_multiline_end
|
train
|
def hacking_docstring_multiline_end(physical_line, previous_logical, tokens):
r"""Check multi line docstring end.
OpenStack HACKING guide recommendation for docstring:
Docstring should end on a new line
Okay: '''foobar\nfoo\nbar\n'''
Okay: def foo():\n '''foobar\n\nfoo\nbar\n'''
Okay: class Foo(object):\n '''foobar\n\nfoo\nbar\n'''
Okay: def foo():\n a = '''not\na\ndocstring'''
Okay: def foo():\n a = '''not\na\ndocstring''' # blah
Okay: def foo():\n pass\n'''foobar\nfoo\nbar\n d'''
H403: def foo():\n '''foobar\nfoo\nbar\ndocstring'''
H403: def foo():\n '''foobar\nfoo\nbar\npretend raw: r'''
H403: class Foo(object):\n '''foobar\nfoo\nbar\ndocstring'''\n\n
"""
docstring = is_docstring(tokens, previous_logical)
if docstring:
if '\n' not in docstring:
# not a multi line
return
else:
last_line = docstring.split('\n')[-1]
pos = max(last_line.rfind(i) for i in END_DOCSTRING_TRIPLE)
if len(last_line[:pos].strip()) > 0:
# Something before the end docstring triple
return (pos,
"H403: multi line docstrings should end on a new line")
|
python
|
{
"resource": ""
}
|
q12633
|
hacking_docstring_multiline_start
|
train
|
def hacking_docstring_multiline_start(physical_line, previous_logical, tokens):
r"""Check multi line docstring starts immediately with summary.
OpenStack HACKING guide recommendation for docstring:
Docstring should start with a one-line summary, less than 80 characters.
Okay: '''foobar\n\nfoo\nbar\n'''
Okay: def foo():\n a = '''\nnot\na docstring\n'''
H404: def foo():\n '''\nfoo\nbar\n'''\n\n
H404: def foo():\n r'''\nfoo\nbar\n'''\n\n
"""
docstring = is_docstring(tokens, previous_logical)
if docstring:
if '\n' not in docstring:
# single line docstring
return
start, start_triple = _find_first_of(docstring, START_DOCSTRING_TRIPLE)
lines = docstring.split('\n')
if lines[0].strip() == start_triple:
# docstrings get tokenized on the last line of the docstring, so
# we don't know the exact position.
return (0, "H404: multi line docstring "
"should start without a leading new line")
|
python
|
{
"resource": ""
}
|
q12634
|
hacking_docstring_summary
|
train
|
def hacking_docstring_summary(physical_line, previous_logical, tokens):
r"""Check multi line docstring summary is separated with empty line.
OpenStack HACKING guide recommendation for docstring:
Docstring should start with a one-line summary, less than 80 characters.
Okay: def foo():\n a = '''\nnot\na docstring\n'''
Okay: '''foobar\n\nfoo\nbar\n'''
H405: def foo():\n '''foobar\nfoo\nbar\n'''
H405: def foo():\n r'''foobar\nfoo\nbar\n'''
H405: def foo():\n '''foobar\n'''
"""
docstring = is_docstring(tokens, previous_logical)
if docstring:
if '\n' not in docstring:
# not a multi line docstring
return
lines = docstring.split('\n')
if len(lines) > 1 and len(lines[1].strip()) is not 0:
# docstrings get tokenized on the last line of the docstring, so
# we don't know the exact position.
return (0, "H405: multi line docstring "
"summary not separated with an empty line")
|
python
|
{
"resource": ""
}
|
q12635
|
is_docstring
|
train
|
def is_docstring(tokens, previous_logical):
"""Return found docstring
'A docstring is a string literal that occurs as the first statement in a
module, function, class,'
http://www.python.org/dev/peps/pep-0257/#what-is-a-docstring
"""
for token_type, text, start, _, _ in tokens:
if token_type == tokenize.STRING:
break
elif token_type != tokenize.INDENT:
return False
else:
return False
line = text.lstrip()
start, start_triple = _find_first_of(line, START_DOCSTRING_TRIPLE)
if (previous_logical.startswith("def ") or
previous_logical.startswith("class ")):
if start == 0:
return text
|
python
|
{
"resource": ""
}
|
q12636
|
_find_first_of
|
train
|
def _find_first_of(line, substrings):
"""Find earliest occurrence of one of substrings in line.
Returns pair of index and found substring, or (-1, None)
if no occurrences of any of substrings were found in line.
"""
starts = ((line.find(i), i) for i in substrings)
found = [(i, sub) for i, sub in starts if i != -1]
if found:
return min(found)
else:
return -1, None
|
python
|
{
"resource": ""
}
|
q12637
|
check_i18n
|
train
|
def check_i18n():
"""Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
"""
while True:
try:
token_type, text, _, _, line = yield
except GeneratorExit:
return
if text == "def" and token_type == tokenize.NAME:
# explicitly ignore function definitions, as oslo defines these
return
if (token_type == tokenize.NAME and
text in ["_", "_LI", "_LW", "_LE", "_LC"]):
while True:
token_type, text, start, _, _ = yield
if token_type != tokenize.NL:
break
if token_type != tokenize.OP or text != "(":
continue # not a localization call
format_string = ''
while True:
token_type, text, start, _, _ = yield
if token_type == tokenize.STRING:
format_string += eval(text)
elif token_type == tokenize.NL:
pass
else:
break
if not format_string:
raise LocalizationError(
start, "H701: Empty localization string")
if token_type != tokenize.OP:
raise LocalizationError(
start, "H701: Invalid localization call")
if text != ")":
if text == "%":
raise LocalizationError(
start,
"H702: Formatting operation should be outside"
" of localization method call")
elif text == "+":
raise LocalizationError(
start,
"H702: Use bare string concatenation instead of +")
else:
raise LocalizationError(
start, "H702: Argument to _, _LI, _LW, _LC, or _LE "
"must be just a string")
format_specs = FORMAT_RE.findall(format_string)
positional_specs = [(key, spec) for key, spec in format_specs
if not key and spec]
# not spec means %%, key means %(smth)s
if len(positional_specs) > 1:
raise LocalizationError(
start, "H703: Multiple positional placeholders")
|
python
|
{
"resource": ""
}
|
q12638
|
hacking_localization_strings
|
train
|
def hacking_localization_strings(logical_line, tokens, noqa):
r"""Check localization in line.
Okay: _("This is fine")
Okay: _LI("This is fine")
Okay: _LW("This is fine")
Okay: _LE("This is fine")
Okay: _LC("This is fine")
Okay: _("This is also fine %s")
Okay: _("So is this %s, %(foo)s") % {foo: 'foo'}
H701: _('')
Okay: def _(msg):\n pass
Okay: def _LE(msg):\n pass
H701: _LI('')
H701: _LW('')
H701: _LE('')
H701: _LC('')
Okay: _('') # noqa
H702: _("Bob" + " foo")
H702: _LI("Bob" + " foo")
H702: _LW("Bob" + " foo")
H702: _LE("Bob" + " foo")
H702: _LC("Bob" + " foo")
Okay: _("Bob" + " foo") # noqa
H702: _("Bob %s" % foo)
H702: _LI("Bob %s" % foo)
H702: _LW("Bob %s" % foo)
H702: _LE("Bob %s" % foo)
H702: _LC("Bob %s" % foo)
H702: _("%s %s" % (foo, bar))
H703: _("%s %s") % (foo, bar)
"""
if noqa:
return
gen = check_i18n()
next(gen)
try:
list(map(gen.send, tokens))
gen.close()
except LocalizationError as e:
yield e.args
|
python
|
{
"resource": ""
}
|
q12639
|
is_none
|
train
|
def is_none(node):
'''Check whether an AST node corresponds to None.
In Python 2 None uses the same ast.Name class that variables etc. use,
but in Python 3 there is a new ast.NameConstant class.
'''
if PY2:
return isinstance(node, ast.Name) and node.id == 'None'
return isinstance(node, ast.NameConstant) and node.value is None
|
python
|
{
"resource": ""
}
|
q12640
|
hacking_assert_equal
|
train
|
def hacking_assert_equal(logical_line, noqa):
r"""Check that self.assertEqual and self.assertNotEqual are used.
Okay: self.assertEqual(x, y)
Okay: self.assertNotEqual(x, y)
H204: self.assertTrue(x == y)
H204: self.assertTrue(x != y)
H204: self.assertFalse(x == y)
H204: self.assertFalse(x != y)
"""
if noqa:
return
methods = ['assertTrue', 'assertFalse']
for method in methods:
start = logical_line.find('.%s' % method) + 1
if start != 0:
break
else:
return
comparisons = [ast.Eq, ast.NotEq]
checker = AssertTrueFalseChecker(methods, comparisons)
checker.visit(ast.parse(logical_line))
if checker.error:
yield start, 'H204: Use assert(Not)Equal()'
|
python
|
{
"resource": ""
}
|
q12641
|
hacking_no_cr
|
train
|
def hacking_no_cr(physical_line):
r"""Check that we only use newlines not carriage returns.
Okay: import os\nimport sys
# pep8 doesn't yet replace \r in strings, will work on an
# upstream fix
H903 import os\r\nimport sys
"""
pos = physical_line.find('\r')
if pos != -1 and pos == (len(physical_line) - 2):
return (pos, "H903: Windows style line endings not allowed in code")
|
python
|
{
"resource": ""
}
|
q12642
|
PaginatedDatasetResults.count
|
train
|
def count(self, count):
"""
Sets the count of this PaginatedDatasetResults.
:param count: The count of this PaginatedDatasetResults.
:type: int
"""
if count is None:
raise ValueError("Invalid value for `count`, must not be `None`")
if count is not None and count < 0:
raise ValueError("Invalid value for `count`, must be a value greater than or equal to `0`")
self._count = count
|
python
|
{
"resource": ""
}
|
q12643
|
WebAuthorization.type
|
train
|
def type(self, type):
"""
Sets the type of this WebAuthorization.
The authorization scheme. Usually this is \"Bearer\" but it could be other values like \"Token\" or \"Basic\" etc.
:param type: The type of this WebAuthorization.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
if type is not None and len(type) > 50:
raise ValueError("Invalid value for `type`, length must be less than or equal to `50`")
if type is not None and not re.search('[\\x21-\\x7E \\t]*', type):
raise ValueError("Invalid value for `type`, must be a follow pattern or equal to `/[\\x21-\\x7E \\t]*/`")
self._type = type
|
python
|
{
"resource": ""
}
|
q12644
|
LocalDataset.describe
|
train
|
def describe(self, resource=None):
"""Describe dataset or resource within dataset
:param resource: The name of a specific resource (i.e. file or table)
contained in the dataset. If ``resource`` is None, this method
will describe the dataset itself. (Default value = None)
:type resource: str, optional
:returns: The descriptor of the dataset or of a specific resource, if
``resource`` is specified in the call.
:rtype: dict
"""
if resource is None:
# Show simpler descriptor, omitting schema definitions
simple_descriptor = copy.deepcopy(self._datapackage.descriptor)
for resource in simple_descriptor['resources']:
resource.pop('schema', None)
return simple_descriptor
else:
return self.__resources[resource].descriptor
|
python
|
{
"resource": ""
}
|
q12645
|
LocalDataset._load_raw_data
|
train
|
def _load_raw_data(self, resource_name):
"""Extract raw data from resource
:param resource_name:
"""
# Instantiating the resource again as a simple `Resource` ensures that
# ``data`` will be returned as bytes.
upcast_resource = datapackage.Resource(
self.__resources[resource_name].descriptor,
default_base_path=self.__base_path)
return upcast_resource.data
|
python
|
{
"resource": ""
}
|
q12646
|
LocalDataset._load_table
|
train
|
def _load_table(self, resource_name):
"""Build table structure from resource data
:param resource_name:
"""
tabular_resource = self.__tabular_resources[resource_name]
try:
# Sorting fields in the same order as they appear in the schema
# is necessary for tables to be converted into pandas.DataFrame
fields = []
if 'schema' in tabular_resource.descriptor:
fields = [f['name'] for f in
tabular_resource.descriptor['schema']['fields']]
elif len(tabular_resource.data) > 0:
fields = tabular_resource.data[0].keys()
return [order_columns_in_row(fields, row) for row in
tabular_resource.data]
except (SchemaValidationError, ValueError, TypeError) as e:
warnings.warn(
'Unable to set column types automatically using {} schema. '
'Data types may need to be adjusted manually. '
'Error: {}'.format(resource_name, e))
self.__invalid_schemas.append(resource_name)
file_format = tabular_resource.descriptor['format']
with Stream(io.BytesIO(self.raw_data[resource_name]),
format=file_format, headers=1,
scheme='stream', encoding='utf-8') as stream:
return [OrderedDict(zip(stream.headers, row))
for row in stream.iter()]
|
python
|
{
"resource": ""
}
|
q12647
|
LocalDataset._load_dataframe
|
train
|
def _load_dataframe(self, resource_name):
"""Build pandas.DataFrame from resource data
Lazy load any optional dependencies in order to allow users to
use package without installing pandas if so they wish.
:param resource_name:
"""
try:
import pandas
except ImportError:
raise RuntimeError('To enable dataframe support, '
'run \'pip install datadotworld[pandas]\'')
tabular_resource = self.__tabular_resources[resource_name]
field_dtypes = fields_to_dtypes(tabular_resource.descriptor['schema'])
try:
return pandas.read_csv(
path.join(
self.__base_path,
tabular_resource.descriptor['path']),
dtype=field_dtypes['other'],
parse_dates=list(field_dtypes['dates'].keys()),
infer_datetime_format=True)
except ValueError as e:
warnings.warn(
'Unable to set data frame dtypes automatically using {} '
'schema. Data types may need to be adjusted manually. '
'Error: {}'.format(resource_name, e))
return pandas.read_csv(
path.join(
self.__base_path,
tabular_resource.descriptor['path']))
|
python
|
{
"resource": ""
}
|
q12648
|
DatasetPatchRequest.visibility
|
train
|
def visibility(self, visibility):
"""
Sets the visibility of this DatasetPatchRequest.
Dataset visibility. `OPEN` if the dataset can be seen by any member of data.world. `PRIVATE` if the dataset can be seen by its owner and authorized collaborators.
:param visibility: The visibility of this DatasetPatchRequest.
:type: str
"""
allowed_values = ["OPEN", "PRIVATE"]
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}"
.format(visibility, allowed_values)
)
self._visibility = visibility
|
python
|
{
"resource": ""
}
|
q12649
|
ProjectPutRequest.objective
|
train
|
def objective(self, objective):
"""
Sets the objective of this ProjectPutRequest.
Short project objective.
:param objective: The objective of this ProjectPutRequest.
:type: str
"""
if objective is not None and len(objective) > 120:
raise ValueError("Invalid value for `objective`, length must be less than or equal to `120`")
if objective is not None and len(objective) < 0:
raise ValueError("Invalid value for `objective`, length must be greater than or equal to `0`")
self._objective = objective
|
python
|
{
"resource": ""
}
|
q12650
|
QueryResults.table
|
train
|
def table(self):
"""Build and cache a table from query results"""
if self._table is None:
self._table = list(self._iter_rows())
return self._table
|
python
|
{
"resource": ""
}
|
q12651
|
QueryResults.dataframe
|
train
|
def dataframe(self):
"""Build and cache a dataframe from query results"""
if self._dataframe is None:
try:
import pandas as pd
except ImportError:
raise RuntimeError('To enable dataframe support, '
'run \'pip install datadotworld[pandas]\'')
self._dataframe = pd.DataFrame.from_records(self._iter_rows(),
coerce_float=True)
return self._dataframe
|
python
|
{
"resource": ""
}
|
q12652
|
fields_to_dtypes
|
train
|
def fields_to_dtypes(schema):
"""Maps table schema fields types to dtypes separating date fields
:param schema:
"""
datetime_types = ['date', 'datetime']
datetime_fields = {
f['name']: _TABLE_SCHEMA_DTYPE_MAPPING.get(f['type'], 'object')
for f in schema['fields']
if f['type'] in datetime_types}
other_fields = {
f['name']: _TABLE_SCHEMA_DTYPE_MAPPING.get(f['type'], 'object')
for f in schema['fields']
if f['type'] not in datetime_types}
return {'dates': datetime_fields, 'other': other_fields}
|
python
|
{
"resource": ""
}
|
q12653
|
sanitize_resource_schema
|
train
|
def sanitize_resource_schema(r):
"""Sanitize table schema for increased compatibility
Up to version 0.9.0 jsontableschema did not support
year, yearmonth and duration field types
https://github.com/frictionlessdata/jsontableschema-py/pull/152
:param r:
"""
if 'schema' in r.descriptor:
r.descriptor['schema'] = _sanitize_schema(r.descriptor['schema'])
return r
|
python
|
{
"resource": ""
}
|
q12654
|
infer_table_schema
|
train
|
def infer_table_schema(sparql_results_json):
"""Infer Table Schema from SPARQL results JSON
SPARQL JSON Results Spec:
https://www.w3.org/TR/2013/REC-sparql11-results-json-20130321
:param sparql_results_json: SPARQL JSON results of a query
:returns: A schema descriptor for the inferred schema
:rtype: dict (json)
"""
if ('results' in sparql_results_json and
'bindings' in sparql_results_json['results'] and
len(sparql_results_json['results']['bindings']) > 0):
# SQL results include metadata, SPARQL results don't
result_metadata = sparql_results_json.get('metadata', [])
metadata_names = [item['name'] for item in result_metadata]
result_vars = sparql_results_json['head']['vars']
_verify_unique_names(result_vars, metadata_names)
# SQL results require var name mapping, SPARQL results vars don't
result_vars_mapping = dict(zip(
result_vars, (metadata_names
if metadata_names != []
else result_vars)))
homogeneous_types = _get_types_from_sample(
result_vars, sparql_results_json)
fields = []
if homogeneous_types is None:
for result_var in result_vars:
fields.append({
'name': result_vars_mapping.get(result_var),
'type': 'string'
})
else:
for index, var in enumerate(result_vars):
field = {
'name': result_vars_mapping.get(var),
'type': infer_table_schema_type_from_rdf_term(
homogeneous_types[var].get('type'),
homogeneous_types[var].get('datatype')
)}
if 'datatype' in homogeneous_types.get(var):
field['rdfType'] = homogeneous_types[var].get('datatype')
term_metadata = (result_metadata[index]
if result_metadata != [] else {})
if 'description' in term_metadata:
field['description'] = term_metadata['description']
fields.append(field)
return _sanitize_schema({'fields': fields})
elif 'boolean' in sparql_results_json:
# ASK query results
return {'fields': [{'name': 'boolean', 'type': 'boolean'}]}
else:
warn('Unable to infer table schema from empty query results')
return None
|
python
|
{
"resource": ""
}
|
q12655
|
order_columns_in_row
|
train
|
def order_columns_in_row(fields, unordered_row):
"""Ensure columns appear in the same order for every row in table
:param fields:
:param unordered_row:
"""
fields_idx = {f: pos for pos, f in enumerate(fields)}
return OrderedDict(sorted(unordered_row.items(),
key=lambda i: fields_idx[i[0]]))
|
python
|
{
"resource": ""
}
|
q12656
|
_get_types_from_sample
|
train
|
def _get_types_from_sample(result_vars, sparql_results_json):
"""Return types if homogenous within sample
Compare up to 10 rows of results to determine homogeneity.
DESCRIBE and CONSTRUCT queries, for example,
:param result_vars:
:param sparql_results_json:
"""
total_bindings = len(sparql_results_json['results']['bindings'])
homogeneous_types = {}
for result_var in result_vars:
var_types = set()
var_datatypes = set()
for i in range(0, min(total_bindings, 10)):
binding = sparql_results_json['results']['bindings'][i]
rdf_term = binding.get(result_var)
if rdf_term is not None: # skip missing values
var_types.add(rdf_term.get('type'))
var_datatypes.add(rdf_term.get('datatype'))
if len(var_types) > 1 or len(var_datatypes) > 1:
return None # Heterogeneous types
else:
homogeneous_types[result_var] = {
'type': var_types.pop() if var_types else None,
'datatype': var_datatypes.pop() if var_datatypes else None
}
return homogeneous_types
|
python
|
{
"resource": ""
}
|
q12657
|
FileConfig.save
|
train
|
def save(self):
"""Persist config changes"""
with open(self._config_file_path, 'w') as file:
self._config_parser.write(file)
|
python
|
{
"resource": ""
}
|
q12658
|
ChainedConfig._first_not_none
|
train
|
def _first_not_none(seq, supplier_func):
"""Applies supplier_func to each element in seq, returns 1st not None
:param seq: Sequence of object
:type seq: iterable
:param supplier_func: Function that extracts the desired value from
elements in seq
:type supplier_func: function
"""
for i in seq:
obj = supplier_func(i)
if obj is not None:
return obj
return None
|
python
|
{
"resource": ""
}
|
q12659
|
cli
|
train
|
def cli(ctx, profile):
"""dw commands support working with multiple data.world accounts
\b
Use a different <profile> value for each account.
In the absence of a <profile>, 'default' will be used.
"""
if ctx.obj is None:
ctx.obj = {}
ctx.obj['profile'] = profile
pass
|
python
|
{
"resource": ""
}
|
q12660
|
configure
|
train
|
def configure(obj, token):
"""Use this command to configure API tokens
"""
config = obj.get('config') or FileConfig(obj['profile'])
config.auth_token = token
config.save()
|
python
|
{
"resource": ""
}
|
q12661
|
FileSourceCreateOrUpdateRequest.request_entity
|
train
|
def request_entity(self, request_entity):
"""
Sets the request_entity of this FileSourceCreateOrUpdateRequest.
:param request_entity: The request_entity of this FileSourceCreateOrUpdateRequest.
:type: str
"""
if request_entity is not None and len(request_entity) > 10000:
raise ValueError("Invalid value for `request_entity`, length must be less than or equal to `10000`")
self._request_entity = request_entity
|
python
|
{
"resource": ""
}
|
q12662
|
InsightPutRequest.title
|
train
|
def title(self, title):
"""
Sets the title of this InsightPutRequest.
Insight title.
:param title: The title of this InsightPutRequest.
:type: str
"""
if title is None:
raise ValueError("Invalid value for `title`, must not be `None`")
if title is not None and len(title) > 128:
raise ValueError("Invalid value for `title`, length must be less than or equal to `128`")
if title is not None and len(title) < 1:
raise ValueError("Invalid value for `title`, length must be greater than or equal to `1`")
self._title = title
|
python
|
{
"resource": ""
}
|
q12663
|
RestApiClient.get_dataset
|
train
|
def get_dataset(self, dataset_key):
"""Retrieve an existing dataset definition
This method retrieves metadata about an existing
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:returns: Dataset definition, with all attributes
:rtype: dict
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> intro_dataset = api_client.get_dataset(
... 'jonloyens/an-intro-to-dataworld-dataset') # doctest: +SKIP
>>> intro_dataset['title'] # doctest: +SKIP
'An Intro to data.world Dataset'
"""
try:
return self._datasets_api.get_dataset(
*(parse_dataset_key(dataset_key))).to_dict()
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12664
|
RestApiClient.create_dataset
|
train
|
def create_dataset(self, owner_id, **kwargs):
"""Create a new dataset
:param owner_id: Username of the owner of the new dataset
:type owner_id: str
:param title: Dataset title (will be used to generate dataset id on
creation)
:type title: str
:param description: Dataset description
:type description: str, optional
:param summary: Dataset summary markdown
:type summary: str, optional
:param tags: Dataset tags
:type tags: list, optional
:param license: Dataset license
:type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY',
'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'}
:param visibility: Dataset visibility
:type visibility: {'OPEN', 'PRIVATE'}
:param files: File name as dict, source URLs, description and labels()
as properties
:type files: dict, optional
*Description and labels are optional*
:returns: Newly created dataset key
:rtype: str
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> url = 'http://www.acme.inc/example.csv'
>>> api_client.create_dataset(
... 'username', title='Test dataset', visibility='PRIVATE',
... license='Public Domain',
... files={'dataset.csv':{'url': url}}) # doctest: +SKIP
"""
request = self.__build_dataset_obj(
lambda: _swagger.DatasetCreateRequest(
title=kwargs.get('title'),
visibility=kwargs.get('visibility')),
lambda name, url, expand_archive, description, labels:
_swagger.FileCreateRequest(
name=name,
source=_swagger.FileSourceCreateRequest(
url=url,
expand_archive=expand_archive),
description=description,
labels=labels),
kwargs)
try:
(_, _, headers) = self._datasets_api.create_dataset_with_http_info(
owner_id, request, _return_http_data_only=False)
if 'Location' in headers:
return headers['Location']
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12665
|
RestApiClient.replace_dataset
|
train
|
def replace_dataset(self, dataset_key, **kwargs):
"""Replace an existing dataset
*This method will completely overwrite an existing dataset.*
:param description: Dataset description
:type description: str, optional
:param summary: Dataset summary markdown
:type summary: str, optional
:param tags: Dataset tags
:type tags: list, optional
:param license: Dataset license
:type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY',
'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'}
:param visibility: Dataset visibility
:type visibility: {'OPEN', 'PRIVATE'}
:param files: File names and source URLs to add or update
:type files: dict, optional
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.replace_dataset(
... 'username/test-dataset',
... visibility='PRIVATE', license='Public Domain',
... description='A better description') # doctest: +SKIP
"""
request = self.__build_dataset_obj(
lambda: _swagger.DatasetPutRequest(
title=kwargs.get('title'),
visibility=kwargs.get('visibility')
),
lambda name, url, expand_archive, description, labels:
_swagger.FileCreateRequest(
name=name,
source=_swagger.FileSourceCreateRequest(
url=url,
expand_archive=expand_archive),
description=description,
labels=labels),
kwargs)
owner_id, dataset_id = parse_dataset_key(dataset_key)
try:
self._datasets_api.replace_dataset(owner_id, dataset_id, request)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12666
|
RestApiClient.delete_dataset
|
train
|
def delete_dataset(self, dataset_key):
"""Deletes a dataset and all associated data
:params dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.delete_dataset(
... 'username/dataset') # doctest: +SKIP
"""
owner_id, dataset_id = parse_dataset_key(dataset_key)
try:
self._datasets_api.delete_dataset(owner_id, dataset_id)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12667
|
RestApiClient.add_files_via_url
|
train
|
def add_files_via_url(self, dataset_key, files={}):
"""Add or update dataset files linked to source URLs
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:param files: Dict containing the name of files and metadata
Uses file name as a dict containing File description, labels and
source URLs to add or update (Default value = {})
*description and labels are optional.*
:type files: dict
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> url = 'http://www.acme.inc/example.csv'
>>> api_client = dw.api_client()
>>> api_client.add_files_via_url(
... 'username/test-dataset',
... {'example.csv': {
... 'url': url,
... 'labels': ['raw data'],
... 'description': 'file description'}}) # doctest: +SKIP
"""
file_requests = [_swagger.FileCreateOrUpdateRequest(
name=file_name,
source=_swagger.FileSourceCreateOrUpdateRequest(
url=file_info['url'],
expand_archive=file_info.get('expand_archive',
False)),
description=file_info.get('description'),
labels=file_info.get('labels'),
) for file_name, file_info in files.items()]
owner_id, dataset_id = parse_dataset_key(dataset_key)
try:
self._datasets_api.add_files_by_source(
owner_id, dataset_id,
_swagger.FileBatchUpdateRequest(files=file_requests))
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12668
|
RestApiClient.sync_files
|
train
|
def sync_files(self, dataset_key):
"""Trigger synchronization process to update all dataset files linked to
source URLs.
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.sync_files('username/test-dataset') # doctest: +SKIP
"""
try:
self._datasets_api.sync(*(parse_dataset_key(dataset_key)))
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12669
|
RestApiClient.upload_files
|
train
|
def upload_files(self, dataset_key, files, files_metadata={}, **kwargs):
"""Upload dataset files
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:param files: The list of names/paths for files stored in the
local filesystem
:type files: list of str
:param expand_archives: Boolean value to indicate files should be
expanded upon upload
:type expand_archive: bool optional
:param files_metadata: Dict containing the name of files and metadata
Uses file name as a dict containing File description, labels and
source URLs to add or update
:type files_metadata: dict optional
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.upload_files(
... 'username/test-dataset',
... ['/my/local/example.csv']) # doctest: +SKIP
"""
owner_id, dataset_id = parse_dataset_key(dataset_key)
try:
self._uploads_api.upload_files(owner_id, dataset_id, files,
**kwargs)
if files_metadata:
self.update_dataset(dataset_key, files=files_metadata)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12670
|
RestApiClient.upload_file
|
train
|
def upload_file(self, dataset_key, name, file_metadata={}, **kwargs):
"""Upload one file to a dataset
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:param name: Name/path for files stored in the local filesystem
:type name: str
:param expand_archives: Boolean value to indicate files should be
expanded upon upload
:type expand_archive: bool optional
:param files_metadata: Dict containing the name of files and metadata
Uses file name as a dict containing File description, labels and
source URLs to add or update
:type files_metadata: dict optional
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.upload_file(
... 'username/test-dataset',
... 'example.csv') # doctest: +SKIP
"""
owner_id, dataset_id = parse_dataset_key(dataset_key)
try:
self._uploads_api.upload_file(owner_id, dataset_id, name, **kwargs)
if file_metadata:
self.update_dataset(dataset_key, files=file_metadata)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12671
|
RestApiClient.download_datapackage
|
train
|
def download_datapackage(self, dataset_key, dest_dir):
"""Download and unzip a dataset's datapackage
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:param dest_dir: Directory under which datapackage should be saved
:type dest_dir: str or path
:returns: Location of the datapackage descriptor (datapackage.json) in
the local filesystem
:rtype: path
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> datapackage_descriptor = api_client.download_datapackage(
... 'jonloyens/an-intro-to-dataworld-dataset',
... '/tmp/test') # doctest: +SKIP
>>> datapackage_descriptor # doctest: +SKIP
'/tmp/test/datapackage.json'
"""
if path.isdir(dest_dir):
raise ValueError('dest_dir must be a new directory, '
'but {} already exists'.format(dest_dir))
owner_id, dataset_id = parse_dataset_key(dataset_key)
url = "{0}://{1}/datapackage/{2}/{3}".format(
self._protocol, self._download_host, owner_id, dataset_id)
headers = {
'User-Agent': _user_agent(),
'Authorization': 'Bearer {0}'.format(self._config.auth_token)
}
try:
response = requests.get(url, headers=headers, stream=True)
response.raise_for_status()
except requests.RequestException as e:
raise RestApiError(cause=e)
unzip_dir = path.join(self._config.tmp_dir, str(uuid.uuid4()))
os.makedirs(unzip_dir)
zip_file = path.join(unzip_dir, 'dataset.zip')
with open(zip_file, 'wb') as f:
for data in response.iter_content(chunk_size=4096):
f.write(data)
zip_obj = zipfile.ZipFile(zip_file)
zip_obj.extractall(path=unzip_dir)
# Find where datapackage.json is within expanded files
unzipped_descriptor = glob.glob(
'{}/**/datapackage.json'.format(unzip_dir))
if not unzipped_descriptor:
raise RuntimeError(
'Zip file did not contain a datapackage manifest.')
unzipped_dir = path.dirname(unzipped_descriptor[0])
shutil.move(unzipped_dir, dest_dir)
shutil.rmtree(unzip_dir, ignore_errors=True)
return path.join(dest_dir, 'datapackage.json')
|
python
|
{
"resource": ""
}
|
q12672
|
RestApiClient.get_user_data
|
train
|
def get_user_data(self):
"""Retrieve data for authenticated user
:returns: User data, with all attributes
:rtype: dict
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> user_data = api_client.get_user_data() # doctest: +SKIP
>>> user_data[display_name] # doctest: +SKIP
'Name User'
"""
try:
return self._user_api.get_user_data().to_dict()
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12673
|
RestApiClient.fetch_contributing_projects
|
train
|
def fetch_contributing_projects(self, **kwargs):
"""Fetch projects that the currently authenticated user has access to
:returns: Authenticated user projects
:rtype: dict
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> user_projects =
... api_client.fetch_contributing_projects() # doctest: +SKIP
{'count': 0, 'records': [], 'next_page_token': None}
"""
try:
return self._user_api.fetch_contributing_projects(
**kwargs).to_dict()
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12674
|
RestApiClient.sql
|
train
|
def sql(self, dataset_key, query, desired_mimetype='application/json',
**kwargs):
"""Executes SQL queries against a dataset via POST
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:param query: SQL query
:type query: str
:param include_table_schema: Flags indicating to include table schema
in the response
:type include_table_schema: bool
:returns: file object that can be used in file parsers and
data handling modules.
:rtype: file-like object
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.sql('username/test-dataset', 'query') # doctest: +SKIP
"""
api_client = self._build_api_client(
default_mimetype_header_accept=desired_mimetype)
sql_api = kwargs.get('sql_api_mock', _swagger.SqlApi(api_client))
owner_id, dataset_id = parse_dataset_key(dataset_key)
try:
response = sql_api.sql_post(
owner_id, dataset_id, query, _preload_content=False, **kwargs)
return six.BytesIO(response.data)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12675
|
RestApiClient.sparql
|
train
|
def sparql(self, dataset_key, query,
desired_mimetype='application/sparql-results+json', **kwargs):
"""Executes SPARQL queries against a dataset via POST
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:param query: SPARQL query
:type query: str
:returns: file object that can be used in file parsers and
data handling modules.
:rtype: file object
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.sparql_post('username/test-dataset',
... query) # doctest: +SKIP
"""
api_client = self._build_api_client(
default_mimetype_header_accept=desired_mimetype)
sparql_api = kwargs.get('sparql_api_mock',
_swagger.SparqlApi(api_client))
owner_id, dataset_id = parse_dataset_key(dataset_key)
try:
response = sparql_api.sparql_post(
owner_id, dataset_id, query, _preload_content=False, **kwargs)
return six.BytesIO(response.data)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12676
|
RestApiClient.download_dataset
|
train
|
def download_dataset(self, dataset_key):
"""Return a .zip containing all files within the dataset as uploaded.
:param dataset_key : Dataset identifier, in the form of owner/id
:type dataset_key: str
:returns: .zip file contain files within dataset
:rtype: file object
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.download_dataset(
... 'username/test-dataset') # doctest: +SKIP
"""
owner_id, dataset_id = parse_dataset_key(dataset_key)
try:
return self._download_api.download_dataset(owner_id, dataset_id)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12677
|
RestApiClient.append_records
|
train
|
def append_records(self, dataset_key, stream_id, body):
"""Append records to a stream.
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:param stream_id: Stream unique identifier.
:type stream_id: str
:param body: Object body
:type body: obj
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.append_records('username/test-dataset','streamId',
... {'content':'content'}) # doctest: +SKIP
"""
owner_id, dataset_id = parse_dataset_key(dataset_key)
try:
return self._streams_api.append_records(owner_id, dataset_id,
stream_id, body)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12678
|
RestApiClient.get_project
|
train
|
def get_project(self, project_key):
"""Retrieve an existing project
This method retrieves metadata about an existing project
:param project_key: Project identifier, in the form of owner/id
:type project_key: str
:returns: Project definition, with all attributes
:rtype: dict
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> intro_project = api_client.get_project(
... 'jonloyens/'
... 'an-example-project-that-shows-what-to-put-in-data-world'
... ) # doctest: +SKIP
>>> intro_project['title'] # doctest: +SKIP
'An Example Project that Shows What To Put in data.world'
"""
try:
owner_id, project_id = parse_dataset_key(project_key)
return self._projects_api.get_project(owner_id,
project_id).to_dict()
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12679
|
RestApiClient.update_project
|
train
|
def update_project(self, project_key, **kwargs):
"""Update an existing project
:param project_key: Username and unique identifier of the creator of a
project in the form of owner/id.
:type project_key: str
:param title: Project title
:type title: str
:param objective: Short project objective.
:type objective: str, optional
:param summary: Long-form project summary.
:type summary: str, optional
:param tags: Project tags. Letters numbers and spaces
:type tags: list, optional
:param license: Project license
:type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY',
'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'}
:param visibility: Project visibility
:type visibility: {'OPEN', 'PRIVATE'}
:param files: File name as dict, source URLs, description and labels()
as properties
:type files: dict, optional
*Description and labels are optional*
:param linked_datasets: Initial set of linked datasets.
:type linked_datasets: list of object, optional
:returns: message object
:rtype: object
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.update_project(
... 'username/test-project',
... tags=['demo', 'datadotworld']) # doctest: +SKIP
"""
request = self.__build_project_obj(
lambda: _swagger.ProjectPatchRequest(),
lambda name, url, description, labels:
_swagger.FileCreateOrUpdateRequest(
name=name,
source=_swagger.FileSourceCreateOrUpdateRequest(url=url),
description=description,
labels=labels),
kwargs)
owner_id, project_id = parse_dataset_key(project_key)
try:
return self._projects_api.patch_project(owner_id,
project_id,
body=request)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12680
|
RestApiClient.replace_project
|
train
|
def replace_project(self, project_key, **kwargs):
"""Replace an existing Project
*Create a project with a given id or completely rewrite the project,
including any previously added files or linked datasets, if one already
exists with the given id.*
:param project_key: Username and unique identifier of the creator of a
project in the form of owner/id.
:type project_key: str
:param title: Project title
:type title: str
:param objective: Short project objective.
:type objective: str, optional
:param summary: Long-form project summary.
:type summary: str, optional
:param tags: Project tags. Letters numbers and spaces
:type tags: list, optional
:param license: Project license
:type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY',
'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'}
:param visibility: Project visibility
:type visibility: {'OPEN', 'PRIVATE'}
:param files: File name as dict, source URLs, description and labels()
as properties
:type files: dict, optional
*Description and labels are optional*
:param linked_datasets: Initial set of linked datasets.
:type linked_datasets: list of object, optional
:returns: project object
:rtype: object
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.replace_project(
... 'username/test-project',
... visibility='PRIVATE',
... objective='A better objective',
... title='Replace project') # doctest: +SKIP
"""
request = self.__build_project_obj(
lambda: _swagger.ProjectCreateRequest(
title=kwargs.get('title'),
visibility=kwargs.get('visibility')
),
lambda name, url, description, labels:
_swagger.FileCreateRequest(
name=name,
source=_swagger.FileSourceCreateRequest(url=url),
description=description,
labels=labels),
kwargs)
try:
project_owner_id, project_id = parse_dataset_key(project_key)
self._projects_api.replace_project(project_owner_id,
project_id,
body=request)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12681
|
RestApiClient.add_linked_dataset
|
train
|
def add_linked_dataset(self, project_key, dataset_key):
"""Link project to an existing dataset
This method links a dataset to project
:param project_key: Project identifier, in the form of owner/id
:type project_key: str
:param dataset_key: Dataset identifier, in the form of owner/id
:type project_key: str
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> linked_dataset = api_client.add_linked_dataset(
... 'username/test-project',
... 'username/test-dataset') # doctest: +SKIP
"""
try:
project_owner_id, project_id = parse_dataset_key(project_key)
dataset_owner_id, dataset_id = parse_dataset_key(dataset_key)
self._projects_api.add_linked_dataset(project_owner_id,
project_id,
dataset_owner_id,
dataset_id)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12682
|
RestApiClient.get_insight
|
train
|
def get_insight(self, project_key, insight_id, **kwargs):
"""Retrieve an insight
:param project_key: Project identifier, in the form of
projectOwner/projectid
:type project_key: str
:param insight_id: Insight unique identifier.
:type insight_id: str
:returns: Insight definition, with all attributes
:rtype: object
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> insight = api_client.get_insight(
... 'jonloyens/'
... 'an-example-project-that-shows-what-to-put-in-data-world',
... 'c2538b0c-c200-474c-9631-5ff4f13026eb') # doctest: +SKIP
>>> insight['title'] # doctest: +SKIP
'Coast Guard Lives Saved by Fiscal Year'
"""
try:
project_owner, project_id = parse_dataset_key(project_key)
return self._insights_api.get_insight(project_owner,
project_id,
insight_id,
**kwargs).to_dict()
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12683
|
RestApiClient.get_insights_for_project
|
train
|
def get_insights_for_project(self, project_key, **kwargs):
"""Get insights for a project.
:param project_key: Project identifier, in the form of
projectOwner/projectid
:type project_key: str
:returns: Insight results
:rtype: object
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> insights = api_client.get_insights_for_project(
... 'jonloyens/'
... 'an-example-project-that-shows-what-to-put-in-data-world'
... ) # doctest: +SKIP
"""
try:
project_owner, project_id = parse_dataset_key(project_key)
return self._insights_api.get_insights_for_project(project_owner,
project_id,
**kwargs)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12684
|
RestApiClient.create_insight
|
train
|
def create_insight(self, project_key, **kwargs):
"""Create a new insight
:param project_key: Project identifier, in the form of
projectOwner/projectid
:type project_key: str
:param title: Insight title
:type title: str
:param description: Insight description.
:type description: str, optional
:param image_url: If image-based, the URL of the image
:type image_url: str
:param embed_url: If embed-based, the embeddable URL
:type embed_url: str
:param source_link: Permalink to source code or platform this insight
was generated with. Allows others to replicate the steps originally
used to produce the insight.
:type source_link: str, optional
:param data_source_links: One or more permalinks to the data sources
used to generate this insight. Allows others to access the data
originally used to produce the insight.
:type data_source_links: array
:returns: Insight with message and uri object
:rtype: object
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.create_insight(
... 'projectOwner/projectid', title='Test insight',
... image_url='url') # doctest: +SKIP
"""
request = self.__build_insight_obj(
lambda: _swagger.InsightCreateRequest(
title=kwargs.get('title'),
body=_swagger.InsightBody(
image_url=kwargs.get('image_url'),
embed_url=kwargs.get('embed_url'),
markdown_body=kwargs.get('markdown_body')
)
), kwargs)
project_owner, project_id = parse_dataset_key(project_key)
try:
(_, _, headers) = self._insights_api.create_insight_with_http_info(
project_owner,
project_id,
body=request,
_return_http_data_only=False)
if 'Location' in headers:
return headers['Location']
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12685
|
RestApiClient.replace_insight
|
train
|
def replace_insight(self, project_key, insight_id, **kwargs):
"""Replace an insight.
:param project_key: Projrct identifier, in the form of
projectOwner/projectid
:type project_key: str
:param insight_id: Insight unique identifier.
:type insight_id: str
:param title: Insight title
:type title: str
:param description: Insight description.
:type description: str, optional
:param image_url: If image-based, the URL of the image
:type image_url: str
:param embed_url: If embed-based, the embeddable URL
:type embed_url: str
:param source_link: Permalink to source code or platform this insight
was generated with. Allows others to replicate the steps originally
used to produce the insight.
:type source_link: str, optional
:param data_source_links: One or more permalinks to the data sources
used to generate this insight. Allows others to access the data
originally used to produce the insight.
:type data_source_links: array
:returns: message object
:rtype: object
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.replace_insight(
... 'projectOwner/projectid',
... '1230-9324-3424242442',
... embed_url='url',
... title='Test insight') # doctest: +SKIP
"""
request = self.__build_insight_obj(
lambda: _swagger.InsightPutRequest(
title=kwargs.get('title'),
body=_swagger.InsightBody(
image_url=kwargs.get('image_url'),
embed_url=kwargs.get('embed_url'),
markdown_body=kwargs.get('markdown_body')
)
), kwargs)
project_owner, project_id = parse_dataset_key(project_key)
try:
self._insights_api.replace_insight(project_owner,
project_id,
insight_id,
body=request)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12686
|
RestApiClient.update_insight
|
train
|
def update_insight(self, project_key, insight_id, **kwargs):
"""Update an insight.
**Note that only elements included in the request will be updated. All
omitted elements will remain untouched.
:param project_key: Projrct identifier, in the form of
projectOwner/projectid
:type project_key: str
:param insight_id: Insight unique identifier.
:type insight_id: str
:param title: Insight title
:type title: str
:param description: Insight description.
:type description: str, optional
:param image_url: If image-based, the URL of the image
:type image_url: str
:param embed_url: If embed-based, the embeddable URL
:type embed_url: str
:param source_link: Permalink to source code or platform this insight
was generated with. Allows others to replicate the steps originally
used to produce the insight.
:type source_link: str, optional
:param data_source_links: One or more permalinks to the data sources
used to generate this insight. Allows others to access the data
originally used to produce the insight.
:type data_source_links: array
:returns: message object
:rtype: object
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.update_insight(
... 'username/test-project', 'insightid'
... title='demo atadotworld'}) # doctest: +SKIP
"""
request = self.__build_insight_obj(
lambda: _swagger.InsightPatchRequest(), kwargs)
project_owner, project_id = parse_dataset_key(project_key)
try:
self._insights_api.update_insight(project_owner,
project_id,
insight_id, body=request)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12687
|
RestApiClient.delete_insight
|
train
|
def delete_insight(self, project_key, insight_id):
"""Delete an existing insight.
:params project_key: Project identifier, in the form of
projectOwner/projectId
:type project_key: str
:params insight_id: Insight unique id
:type insight_id: str
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> del_insight = api_client.delete_insight(
... 'username/project', 'insightid') # doctest: +SKIP
"""
projectOwner, projectId = parse_dataset_key(project_key)
try:
self._insights_api.delete_insight(projectOwner,
projectId,
insight_id)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
python
|
{
"resource": ""
}
|
q12688
|
parse_dataset_key
|
train
|
def parse_dataset_key(dataset_key):
"""Parse a dataset URL or path and return the owner and the dataset id
:param dataset_key: Dataset key (in the form of owner/id) or dataset URL
:type dataset_key: str
:returns: User name of the dataset owner and ID of the dataset
:rtype: dataset_owner, dataset_id
:raises ValueError: If the provided key does comply to the expected pattern
Examples
--------
>>> from datadotworld import util
>>> util.parse_dataset_key(
... 'https://data.world/jonloyens/an-intro-to-datadotworld-dataset')
('jonloyens', 'an-intro-to-datadotworld-dataset')
>>> util.parse_dataset_key('jonloyens/an-intro-to-datadotworld-dataset')
('jonloyens', 'an-intro-to-datadotworld-dataset')
"""
match = re.match(DATASET_KEY_PATTERN, dataset_key)
if not match:
raise ValueError('Invalid dataset key. Key must include user and '
'dataset names, separated by (i.e. user/dataset).')
return match.groups()
|
python
|
{
"resource": ""
}
|
q12689
|
LazyLoadedDict.from_keys
|
train
|
def from_keys(cls, keys, loader_func, type_hint=None):
"""Factory method for `LazyLoadedDict`
Accepts a ``loader_func`` that is to be applied to all ``keys``.
:param keys: List of keys to create the dictionary with
:type keys: iterable
:param loader_func: Function to be applied to all keys
:type loader_func: function
:param type_hint: Expected type of lazy loaded values.
Used by `LazyLoadedValue`. (Default value = None)
:type type_hint: str
:returns: A properly constructed lazy loaded dictionary
:rtype: LazyLoadedDict
"""
return cls({k: LazyLoadedValue(
lambda k=k: loader_func(k), type_hint=type_hint) for k in keys})
|
python
|
{
"resource": ""
}
|
q12690
|
FileCreateOrUpdateRequest.name
|
train
|
def name(self, name):
"""
Sets the name of this FileCreateOrUpdateRequest.
File name. Should include type extension always when possible. Must not include slashes.
:param name: The name of this FileCreateOrUpdateRequest.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
if name is not None and len(name) > 128:
raise ValueError("Invalid value for `name`, length must be less than or equal to `128`")
if name is not None and len(name) < 1:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`")
if name is not None and not re.search('^[^\/]+$', name):
raise ValueError("Invalid value for `name`, must be a follow pattern or equal to `/^[^\/]+$/`")
self._name = name
|
python
|
{
"resource": ""
}
|
q12691
|
FileCreateOrUpdateRequest.labels
|
train
|
def labels(self, labels):
"""
Sets the labels of this FileCreateOrUpdateRequest.
File labels.
:param labels: The labels of this FileCreateOrUpdateRequest.
:type: list[str]
"""
allowed_values = ["raw data", "documentation", "visualization", "clean data", "script", "report"]
if not set(labels).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `labels` [{0}], must be a subset of [{1}]"
.format(", ".join(map(str, set(labels)-set(allowed_values))),
", ".join(map(str, allowed_values)))
)
self._labels = labels
|
python
|
{
"resource": ""
}
|
q12692
|
WebCredentials.user
|
train
|
def user(self, user):
"""
Sets the user of this WebCredentials.
The name of the account to login to.
:param user: The user of this WebCredentials.
:type: str
"""
if user is None:
raise ValueError("Invalid value for `user`, must not be `None`")
if user is not None and len(user) > 1024:
raise ValueError("Invalid value for `user`, length must be less than or equal to `1024`")
self._user = user
|
python
|
{
"resource": ""
}
|
q12693
|
OauthTokenReference.owner
|
train
|
def owner(self, owner):
"""
Sets the owner of this OauthTokenReference.
User name of the owner of the OAuth token within data.world.
:param owner: The owner of this OauthTokenReference.
:type: str
"""
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`")
if owner is not None and len(owner) > 31:
raise ValueError("Invalid value for `owner`, length must be less than or equal to `31`")
if owner is not None and len(owner) < 3:
raise ValueError("Invalid value for `owner`, length must be greater than or equal to `3`")
if owner is not None and not re.search('[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]', owner):
raise ValueError("Invalid value for `owner`, must be a follow pattern or equal to `/[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]/`")
self._owner = owner
|
python
|
{
"resource": ""
}
|
q12694
|
OauthTokenReference.site
|
train
|
def site(self, site):
"""
Sets the site of this OauthTokenReference.
:param site: The site of this OauthTokenReference.
:type: str
"""
if site is None:
raise ValueError("Invalid value for `site`, must not be `None`")
if site is not None and len(site) > 255:
raise ValueError("Invalid value for `site`, length must be less than or equal to `255`")
if site is not None and len(site) < 3:
raise ValueError("Invalid value for `site`, length must be greater than or equal to `3`")
if site is not None and not re.search('(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?\\.)+[a-z]{2,}(?:@[a-z0-9](?:[-.](?=[a-z0-9])|[a-z0-9]){0,29})?', site):
raise ValueError("Invalid value for `site`, must be a follow pattern or equal to `/(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?\\.)+[a-z]{2,}(?:@[a-z0-9](?:[-.](?=[a-z0-9])|[a-z0-9]){0,29})?/`")
self._site = site
|
python
|
{
"resource": ""
}
|
q12695
|
FileSourceCreateRequest.url
|
train
|
def url(self, url):
"""
Sets the url of this FileSourceCreateRequest.
Source URL of file. Must be an http, https.
:param url: The url of this FileSourceCreateRequest.
:type: str
"""
if url is None:
raise ValueError("Invalid value for `url`, must not be `None`")
if url is not None and len(url) > 4096:
raise ValueError("Invalid value for `url`, length must be less than or equal to `4096`")
if url is not None and len(url) < 1:
raise ValueError("Invalid value for `url`, length must be greater than or equal to `1`")
if url is not None and not re.search('^https?:.*', url):
raise ValueError("Invalid value for `url`, must be a follow pattern or equal to `/^https?:.*/`")
self._url = url
|
python
|
{
"resource": ""
}
|
q12696
|
LinkedDatasetCreateOrUpdateRequest.owner
|
train
|
def owner(self, owner):
"""
Sets the owner of this LinkedDatasetCreateOrUpdateRequest.
User name and unique identifier of the creator of the dataset.
:param owner: The owner of this LinkedDatasetCreateOrUpdateRequest.
:type: str
"""
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`")
if owner is not None and not re.search('[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]', owner):
raise ValueError("Invalid value for `owner`, must be a follow pattern or equal to `/[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]/`")
self._owner = owner
|
python
|
{
"resource": ""
}
|
q12697
|
RemoteFile.read
|
train
|
def read(self):
"""read the contents of the file that's been opened in read mode"""
if 'r' == self._mode:
return self._read_response.text
elif 'rb' == self._mode:
return self._read_response.content
else:
raise IOError("File not opened in read mode.")
|
python
|
{
"resource": ""
}
|
q12698
|
RemoteFile._open_for_read
|
train
|
def _open_for_read(self):
"""open the file in read mode"""
ownerid, datasetid = parse_dataset_key(self._dataset_key)
response = requests.get(
'{}/file_download/{}/{}/{}'.format(
self._query_host, ownerid, datasetid, self._file_name),
headers={
'User-Agent': self._user_agent,
'Authorization': 'Bearer {}'.format(
self._config.auth_token)
}, stream=True)
try:
response.raise_for_status()
except Exception as e:
raise RestApiError(cause=e)
self._read_response = response
|
python
|
{
"resource": ""
}
|
q12699
|
RemoteFile._open_for_write
|
train
|
def _open_for_write(self):
"""open the file in write mode"""
def put_request(body):
"""
:param body:
"""
ownerid, datasetid = parse_dataset_key(self._dataset_key)
response = requests.put(
"{}/uploads/{}/{}/files/{}".format(
self._api_host, ownerid, datasetid, self._file_name),
data=body,
headers={
'User-Agent': self._user_agent,
'Authorization': 'Bearer {}'.format(
self._config.auth_token)
})
self._response_queue.put(response)
body = iter(self._queue.get, self._sentinel)
self._thread = Thread(target=put_request, args=(body,))
self._thread.start()
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.