code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
''' Delete the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. ''' pipeline = self.redis_connection.pipeline() pipeline.delete(leaderboard_name) pipeline.delete(self._member_data_key(leaderboard_name)) pipeline.delete(self._ties_leaderboard_key(leaderboard_name)) pipeline.execute()
def delete_leaderboard_named(self, leaderboard_name)
Delete the named leaderboard. @param leaderboard_name [String] Name of the leaderboard.
3.349273
2.993074
1.119008
''' Change the score for a member in the named leaderboard by a delta which can be positive or negative. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @param delta [float] Score change. @param member_data [String] Optional member data. ''' previous_score = self.score_for(member) new_score = (previous_score or 0) + delta total_members_at_previous_score = [] if previous_score is not None: total_members_at_previous_score = self.redis_connection.zrevrangebyscore(leaderboard_name, previous_score, previous_score) pipeline = self.redis_connection.pipeline() if isinstance(self.redis_connection, Redis): pipeline.zadd(leaderboard_name, member, new_score) pipeline.zadd(self._ties_leaderboard_key(leaderboard_name), str(float(new_score)), new_score) else: pipeline.zadd(leaderboard_name, new_score, member) pipeline.zadd(self._ties_leaderboard_key(leaderboard_name), new_score, str(float(new_score))) if member_data: pipeline.hset( self._member_data_key(leaderboard_name), member, member_data) pipeline.execute() if len(total_members_at_previous_score) == 1: self.redis_connection.zrem(self._ties_leaderboard_key(leaderboard_name), str(float(previous_score)))
def change_score_for_member_in(self, leaderboard_name, member, delta, member_data=None)
Change the score for a member in the named leaderboard by a delta which can be positive or negative. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @param delta [float] Score change. @param member_data [String] Optional member data.
2.268761
1.948456
1.164389
''' Rank a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @param score [float] Member score. @param member_data [String] Optional member data. ''' member_score = None or self.redis_connection.zscore(leaderboard_name, member) can_delete_score = member_score is not None and\ (len(self.members_from_score_range_in(leaderboard_name, member_score, member_score)) == 1) and\ member_score != score pipeline = self.redis_connection.pipeline() if isinstance(self.redis_connection, Redis): pipeline.zadd(leaderboard_name, member, score) pipeline.zadd(self._ties_leaderboard_key(leaderboard_name), str(float(score)), score) else: pipeline.zadd(leaderboard_name, score, member) pipeline.zadd(self._ties_leaderboard_key(leaderboard_name), score, str(float(score))) if can_delete_score: pipeline.zrem(self._ties_leaderboard_key(leaderboard_name), str(float(member_score))) if member_data: pipeline.hset( self._member_data_key(leaderboard_name), member, member_data) pipeline.execute()
def rank_member_in( self, leaderboard_name, member, score, member_data=None)
Rank a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @param score [float] Member score. @param member_data [String] Optional member data.
2.525027
2.249487
1.12249
''' Rank a member across multiple leaderboards. @param leaderboards [Array] Leaderboard names. @param member [String] Member name. @param score [float] Member score. @param member_data [String] Optional member data. ''' for leaderboard_name in leaderboards: self.rank_member_in(leaderboard, member, score, member_data)
def rank_member_across( self, leaderboards, member, score, member_data=None)
Rank a member across multiple leaderboards. @param leaderboards [Array] Leaderboard names. @param member [String] Member name. @param score [float] Member score. @param member_data [String] Optional member data.
3.139022
2.09241
1.500195
''' Rank an array of members in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param members_and_scores [Array] Variable list of members and scores. ''' for member, score in grouper(2, members_and_scores): self.rank_member_in(leaderboard_name, member, score)
def rank_members_in(self, leaderboard_name, members_and_scores)
Rank an array of members in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param members_and_scores [Array] Variable list of members and scores.
3.365628
2.115309
1.59108
''' Remove the optional member data for a given member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. ''' member_score = None or self.redis_connection.zscore( leaderboard_name, member) can_delete_score = member_score and len( self.members_from_score_range_in(leaderboard_name, member_score, member_score)) == 1 pipeline = self.redis_connection.pipeline() pipeline.zrem(leaderboard_name, member) if can_delete_score: pipeline.zrem(self._ties_leaderboard_key(leaderboard_name), str(float(member_score))) pipeline.hdel(self._member_data_key(leaderboard_name), member) pipeline.execute()
def remove_member_from(self, leaderboard_name, member)
Remove the optional member data for a given member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name.
3.770993
2.918847
1.291946
''' Retrieve the rank for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the rank for a member in the leaderboard. ''' member_score = self.score_for_in(leaderboard_name, member) if self.order == self.ASC: try: return self.redis_connection.zrank( self._ties_leaderboard_key(leaderboard_name), str(float(member_score))) + 1 except: return None else: try: return self.redis_connection.zrevrank( self._ties_leaderboard_key(leaderboard_name), str(float(member_score))) + 1 except: return None
def rank_for_in(self, leaderboard_name, member)
Retrieve the rank for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the rank for a member in the leaderboard.
2.725609
2.139411
1.273999
''' Remove members from the named leaderboard in a given score range. @param leaderboard_name [String] Name of the leaderboard. @param min_score [float] Minimum score. @param max_score [float] Maximum score. ''' pipeline = self.redis_connection.pipeline() pipeline.zremrangebyscore( leaderboard_name, min_score, max_score) pipeline.zremrangebyscore( self._ties_leaderboard_key(leaderboard_name), min_score, max_score) pipeline.execute()
def remove_members_in_score_range_in( self, leaderboard_name, min_score, max_score)
Remove members from the named leaderboard in a given score range. @param leaderboard_name [String] Name of the leaderboard. @param min_score [float] Minimum score. @param max_score [float] Maximum score.
2.550282
1.980798
1.287502
''' Expire the given leaderboard at a specific UNIX timestamp. Do not use this with leaderboards that utilize member data as there is no facility to cascade the expiration out to the keys for the member data. @param leaderboard_name [String] Name of the leaderboard. @param timestamp [int] UNIX timestamp at which the leaderboard will be expired. ''' pipeline = self.redis_connection.pipeline() pipeline.expireat(leaderboard_name, timestamp) pipeline.expireat( self._ties_leaderboard_key(leaderboard_name), timestamp) pipeline.expireat(self._member_data_key(leaderboard_name), timestamp) pipeline.execute()
def expire_leaderboard_at_for(self, leaderboard_name, timestamp)
Expire the given leaderboard at a specific UNIX timestamp. Do not use this with leaderboards that utilize member data as there is no facility to cascade the expiration out to the keys for the member data. @param leaderboard_name [String] Name of the leaderboard. @param timestamp [int] UNIX timestamp at which the leaderboard will be expired.
4.636444
1.940656
2.389111
''' Retrieve the rank for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the rank for a member in the leaderboard. ''' member_score = str(float(self.score_for_in(leaderboard_name, member))) if self.order == self.ASC: try: return self.redis_connection.zcount( leaderboard_name, '-inf', '(%s' % member_score) + 1 except: return None else: try: return self.redis_connection.zcount( leaderboard_name, '(%s' % member_score, '+inf') + 1 except: return None
def rank_for_in(self, leaderboard_name, member)
Retrieve the rank for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the rank for a member in the leaderboard.
2.869313
2.240997
1.280374
''' Retrieve the score and rank for a member in the named leaderboard. @param leaderboard_name [String]Name of the leaderboard. @param member [String] Member name. @return the score and rank for a member in the named leaderboard as a Hash. ''' pipeline = self.redis_connection.pipeline() pipeline.zscore(leaderboard_name, member) if self.order == self.ASC: pipeline.zrank(leaderboard_name, member) else: pipeline.zrevrank(leaderboard_name, member) responses = pipeline.execute() if responses[0] is not None: responses[0] = float(responses[0]) if self.order == self.ASC: try: responses[1] = self.redis_connection.zcount( leaderboard_name, '-inf', "(%s" % str(float(responses[0]))) + 1 except: responses[1] = None else: try: responses[1] = self.redis_connection.zcount( leaderboard_name, "(%s" % str(float(responses[0])), '+inf') + 1 except: responses[1] = None return { self.MEMBER_KEY: member, self.SCORE_KEY: responses[0], self.RANK_KEY: responses[1] }
def score_and_rank_for_in(self, leaderboard_name, member)
Retrieve the score and rank for a member in the named leaderboard. @param leaderboard_name [String]Name of the leaderboard. @param member [String] Member name. @return the score and rank for a member in the named leaderboard as a Hash.
2.367538
1.877697
1.260873
if key in allowed: return True for pattern in allowed: if fnmatch(key, pattern): return True return False
def check_key(key, allowed)
Validate that the specified key is allowed according the provided list of patterns.
3.683109
2.881013
1.278408
if PY2 and isinstance(s, text_type): s = s.encode("utf-8") return quote(s, safe="*")
def cs_encode(s)
Encode URI component like CloudStack would do before signing. java.net.URLEncoder.encode(s).replace('+', '%20')
4.074514
4.607167
0.884386
for key, value in list(params.items()): if value is None: params.pop(key) continue if isinstance(value, (string_type, binary_type)): continue if isinstance(value, integer_types): params[key] = text_type(value) elif isinstance(value, (list, tuple, set, dict)): if not value: params.pop(key) else: if isinstance(value, dict): value = [value] if isinstance(value, set): value = list(value) if not isinstance(value[0], dict): params[key] = ",".join(value) else: params.pop(key) for index, val in enumerate(value): for name, v in val.items(): k = "%s[%d].%s" % (key, index, name) params[k] = text_type(v) else: raise ValueError(type(value))
def transform(params)
Transforms an heterogeneous map of params into a CloudStack ready mapping of parameter to values. It handles lists and dicts. >>> p = {"a": 1, "b": "foo", "c": ["eggs", "spam"], "d": {"key": "value"}} >>> transform(p) >>> print(p) {'a': '1', 'b': 'foo', 'c': 'eggs,spam', 'd[0].key': 'value'}
2.262736
2.291422
0.987481
env_conf = dict(DEFAULT_CONFIG) for key in REQUIRED_CONFIG_KEYS.union(ALLOWED_CONFIG_KEYS): env_key = "CLOUDSTACK_{0}".format(key.upper()) value = os.getenv(env_key) if value: env_conf[key] = value # overrides means we have a .ini to read overrides = os.getenv('CLOUDSTACK_OVERRIDES', '').strip() if not overrides and set(env_conf).issuperset(REQUIRED_CONFIG_KEYS): return env_conf ini_conf = read_config_from_ini(ini_group) overrides = {s.lower() for s in re.split(r'\W+', overrides)} config = dict(dict(env_conf, **ini_conf), **{k: v for k, v in env_conf.items() if k in overrides}) missings = REQUIRED_CONFIG_KEYS.difference(config) if missings: raise ValueError("the configuration is missing the following keys: " + ", ".join(missings)) # convert booleans values. bool_keys = ('dangerous_no_tls_verify',) for bool_key in bool_keys: if isinstance(config[bool_key], string_type): try: config[bool_key] = strtobool(config[bool_key]) except ValueError: pass return config
def read_config(ini_group=None)
Read the configuration from the environment, or config. First it try to go for the environment, then it overrides those with the cloudstack.ini file.
3.353273
3.176191
1.055753
if json: contentType = response.headers.get("Content-Type", "") if not contentType.startswith(("application/json", "text/javascript")): if response.status_code == 200: raise CloudStackException( "JSON (application/json) was expected, got {!r}" .format(contentType), response=response) raise CloudStackException( "HTTP {0.status_code} {0.reason}" .format(response), "Make sure endpoint URL {!r} is correct." .format(self.endpoint), response=response) try: data = response.json() except ValueError as e: raise CloudStackException( "HTTP {0.status_code} {0.reason}" .format(response), "{0!s}. Malformed JSON document".format(e), response=response) [key] = data.keys() data = data[key] else: data = response.text if response.status_code != 200: raise CloudStackException( "HTTP {0} response from CloudStack".format( response.status_code), data, response=response) return data
def _response_value(self, response, json=True)
Parses the HTTP response as a the cloudstack value. It throws an exception if the server didn't answer with a 200.
2.843827
2.719062
1.045885
failures = 0 total_time = self.job_timeout or 2**30 remaining = timedelta(seconds=total_time) endtime = datetime.now() + remaining while remaining.total_seconds() > 0: timeout = max(min(self.timeout, remaining.total_seconds()), 1) try: kind, params = self._prepare_request('queryAsyncJobResult', jobid=jobid) transform(params) params['signature'] = self._sign(params) req = requests.Request(self.method, self.endpoint, headers=headers, **{kind: params}) prepped = req.prepare() if self.trace: print(prepped.method, prepped.url, file=sys.stderr) if prepped.headers: print(prepped.headers, "\n", file=sys.stderr) if prepped.body: print(prepped.body, file=sys.stderr) else: print(file=sys.stderr) with requests.Session() as session: response = session.send(prepped, timeout=timeout, verify=self.verify, cert=self.cert) j = self._response_value(response, json) if self.trace: print(response.status_code, response.reason, file=sys.stderr) headersTrace = "\n".join( "{}: {}".format(k, v) for k, v in response.headers.items()) print(headersTrace, "\n", file=sys.stderr) print(response.text, "\n", file=sys.stderr) failures = 0 if j['jobstatus'] != PENDING: if j['jobresultcode'] or j['jobstatus'] != SUCCESS: raise CloudStackException("Job failure", response=response) if 'jobresult' not in j: raise CloudStackException("Unknown job result", response=response) return j['jobresult'] except CloudStackException: raise except Exception as e: failures += 1 if failures > 10: raise e time.sleep(self.poll_interval) remaining = endtime - datetime.now() if response: response.status_code = 408 raise CloudStackException("Timeout waiting for async job result", jobid, response=response)
def _jobresult(self, jobid, json=True, headers=None)
Poll the async job result. To be run via in a Thread, the result is put within the result list which is a hack.
2.875048
2.872681
1.000824
# Python2/3 urlencode aren't good enough for this task. params = "&".join( "=".join((key, cs_encode(value))) for key, value in sorted(data.items()) ) digest = hmac.new( self.secret.encode('utf-8'), msg=params.lower().encode('utf-8'), digestmod=hashlib.sha1).digest() return base64.b64encode(digest).decode('utf-8').strip()
def _sign(self, data)
Compute a signature string according to the CloudStack signature method (hmac/sha1).
3.838791
3.627244
1.058322
output = json.dumps(data, indent=2, sort_keys=True) if pygments and sys.stdout.isatty(): style = get_style_by_name(theme) formatter = Terminal256Formatter(style=style) return pygments.highlight(output, JsonLexer(), formatter) return output
def _format_json(data, theme)
Pretty print a dict as a JSON, with colors if pygments is present.
2.631771
2.197965
1.197367
self.buf.extend(data) while self.buf: if self.state == AWAITING_CONTROL_LINE: msg = MSG_RE.match(self.buf) if msg: try: subject, sid, _, reply, needed_bytes = msg.groups() self.msg_arg["subject"] = subject self.msg_arg["sid"] = int(sid) if reply: self.msg_arg["reply"] = reply else: self.msg_arg["reply"] = b'' self.needed = int(needed_bytes) del self.buf[:msg.end()] self.state = AWAITING_MSG_PAYLOAD continue except: raise ErrProtocol("nats: malformed MSG") ok = OK_RE.match(self.buf) if ok: # Do nothing and just skip. del self.buf[:ok.end()] continue err = ERR_RE.match(self.buf) if err: err_msg = err.groups() yield self.nc._process_err(err_msg) del self.buf[:err.end()] continue ping = PING_RE.match(self.buf) if ping: del self.buf[:ping.end()] yield self.nc._process_ping() continue pong = PONG_RE.match(self.buf) if pong: del self.buf[:pong.end()] yield self.nc._process_pong() continue info = INFO_RE.match(self.buf) if info: info_line = info.groups()[0] self.nc._process_info(info_line) del self.buf[:info.end()] continue # If nothing matched at this point, then probably # a split buffer and need to gather more bytes, # otherwise it would mean that there is an issue # and we're getting malformed control lines. if len(self.buf ) < MAX_CONTROL_LINE_SIZE and _CRLF_ not in self.buf: break else: raise ErrProtocol("nats: unknown protocol") elif self.state == AWAITING_MSG_PAYLOAD: if len(self.buf) >= self.needed + CRLF_SIZE: subject = self.msg_arg["subject"] sid = self.msg_arg["sid"] reply = self.msg_arg["reply"] # Consume msg payload from buffer and set next parser state. payload = bytes(self.buf[:self.needed]) del self.buf[:self.needed + CRLF_SIZE] self.state = AWAITING_CONTROL_LINE yield self.nc._process_msg(sid, subject, reply, payload) else: # Wait until we have enough bytes in buffer. break
def parse(self, data=b'')
Parses the wire protocol from NATS for the client and dispatches the subscription callbacks.
2.809095
2.741507
1.024654
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.setblocking(0) self._socket.settimeout(1.0) if self.options["tcp_nodelay"]: self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.io = tornado.iostream.IOStream(self._socket, max_buffer_size=self._max_read_buffer_size, max_write_buffer_size=self._max_write_buffer_size, read_chunk_size=self._read_chunk_size) # Connect to server with a deadline future = self.io.connect((s.uri.hostname, s.uri.port)) yield tornado.gen.with_timeout( timedelta(seconds=self.options["connect_timeout"]), future) # Called whenever disconnected from the server. self.io.set_close_callback(self._process_op_err)
def _server_connect(self, s)
Sets up a TCP connection to the server.
2.786247
2.730335
1.020478
''' Generates a JSON string with the params to be used when sending CONNECT to the server. ->> CONNECT {"verbose": false, "pedantic": false, "lang": "python2" } ''' options = { "verbose": self.options["verbose"], "pedantic": self.options["pedantic"], "lang": __lang__, "version": __version__, "protocol": PROTOCOL } if "auth_required" in self._server_info: if self._server_info["auth_required"] == True: # In case there is no password, then consider handle # sending a token instead. if self.options["user"] is not None and self.options["password"] is not None: options["user"] = self.options["user"] options["pass"] = self.options["password"] elif self.options["token"] is not None: options["auth_token"] = self.options["token"] elif self._current_server.uri.password is None: options["auth_token"] = self._current_server.uri.username else: options["user"] = self._current_server.uri.username options["pass"] = self._current_server.uri.password if self.options["name"] is not None: options["name"] = self.options["name"] if self.options["no_echo"] is not None: options["echo"] = not self.options["no_echo"] args = json.dumps(options, sort_keys=True) return CONNECT_PROTO.format(CONNECT_OP, args, _CRLF_)
def connect_command(self)
Generates a JSON string with the params to be used when sending CONNECT to the server. ->> CONNECT {"verbose": false, "pedantic": false, "lang": "python2" }
3.481827
2.535881
1.373025
if priority: self._pending.insert(0, cmd) else: self._pending.append(cmd) self._pending_size += len(cmd) if self._pending_size > DEFAULT_PENDING_SIZE: yield self._flush_pending()
def send_command(self, cmd, priority=False)
Flushes a command to the server as a bytes payload.
3.438967
3.195187
1.076296
payload_size = len(payload) if payload_size > self._max_payload_size: raise ErrMaxPayload if self.is_closed: raise ErrConnectionClosed yield self._publish(subject, reply, payload, payload_size) if self._flush_queue.empty(): yield self._flush_pending()
def publish_request(self, subject, reply, payload)
Publishes a message tagging it with a reply subscription which can be used by those receiving the message to respond: ->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5 ->> MSG_PAYLOAD: world <<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5
4.017827
4.574978
0.878218
future = tornado.concurrent.Future() yield self._send_ping(future) try: result = yield tornado.gen.with_timeout( timedelta(seconds=timeout), future) except tornado.gen.TimeoutError: # Set the future to False so it can be ignored in _process_pong, # and try to remove from the list of pending pongs. future.set_result(False) for i, pong_future in enumerate(self._pongs): if pong_future == future: del self._pongs[i] break raise raise tornado.gen.Return(result)
def _flush_timeout(self, timeout)
Takes a timeout and sets up a future which will return True once the server responds back otherwise raise a TimeoutError.
3.187812
2.92968
1.088109
next_inbox = INBOX_PREFIX[:] next_inbox.extend(self._nuid.next()) inbox = str(next_inbox) future = tornado.concurrent.Future() sid = yield self.subscribe( subject=inbox, queue=_EMPTY_, cb=None, future=future, max_msgs=1) yield self.auto_unsubscribe(sid, 1) yield self.publish_request(subject, inbox, payload) msg = yield tornado.gen.with_timeout( timedelta(seconds=timeout), future) raise tornado.gen.Return(msg)
def timed_request(self, subject, payload, timeout=0.5)
Implements the request/response pattern via pub/sub using an ephemeral subscription which will be published with a limited interest of 1 reply returning the response or raising a Timeout error. ->> SUB _INBOX.E9jM2HTirMXDMXPROSQmSd 90 ->> UNSUB 90 1 ->> PUB hello _INBOX.E9jM2HTirMXDMXPROSQmSd 5 ->> MSG_PAYLOAD: world <<- MSG hello 2 _INBOX.E9jM2HTirMXDMXPROSQmSd 5
5.73411
5.550018
1.03317
if self.is_closed: raise ErrConnectionClosed if self.is_draining: raise ErrConnectionDraining self._ssid += 1 sid = self._ssid sub = Subscription( subject=subject, queue=queue, cb=cb, future=future, max_msgs=max_msgs, is_async=is_async, sid=sid, ) self._subs[sid] = sub if cb is not None: sub.pending_msgs_limit = pending_msgs_limit sub.pending_bytes_limit = pending_bytes_limit sub.pending_queue = tornado.queues.Queue( maxsize=pending_msgs_limit) @tornado.gen.coroutine def wait_for_msgs(): while True: sub = wait_for_msgs.sub err_cb = wait_for_msgs.err_cb try: sub = wait_for_msgs.sub if sub.closed: break msg = yield sub.pending_queue.get() if msg is None: break sub.received += 1 sub.pending_size -= len(msg.data) if sub.max_msgs > 0 and sub.received >= sub.max_msgs: # If we have hit the max for delivered msgs, remove sub. self._subs.pop(sub.sid, None) self._remove_subscription(sub) # Invoke depending of type of handler. if sub.is_async: # NOTE: Deprecate this usage in a next release, # the handler implementation ought to decide # the concurrency level at which the messages # should be processed. self._loop.spawn_callback(sub.cb, msg) else: yield sub.cb(msg) except Exception as e: # All errors from calling an async subscriber # handler are async errors. if err_cb is not None: yield err_cb(e) # Bind the subscription and error cb if present wait_for_msgs.sub = sub wait_for_msgs.err_cb = self._error_cb self._loop.spawn_callback(wait_for_msgs) elif future is not None: # Used to handle the single response from a request # based on auto unsubscribe. sub.future = future # Send SUB command... sub_cmd = b''.join([ SUB_OP, _SPC_, sub.subject.encode(), _SPC_, sub.queue.encode(), _SPC_, ("%d" % sid).encode(), _CRLF_ ]) yield self.send_command(sub_cmd) yield self._flush_pending() raise tornado.gen.Return(sid)
def subscribe( self, subject="", queue="", cb=None, future=None, max_msgs=0, is_async=False, pending_msgs_limit=DEFAULT_SUB_PENDING_MSGS_LIMIT, pending_bytes_limit=DEFAULT_SUB_PENDING_BYTES_LIMIT, )
Sends a SUB command to the server. Takes a queue parameter which can be used in case of distributed queues or left empty if it is not the case, and a callback that will be dispatched message for processing them.
3.550442
3.588664
0.989349
kwargs["is_async"] = True sid = yield self.subscribe(subject, **kwargs) raise tornado.gen.Return(sid)
def subscribe_async(self, subject, **kwargs)
Schedules callback from subscription to be processed asynchronously in the next iteration of the loop.
4.30729
4.26413
1.010122
if self.is_closed: raise ErrConnectionClosed sub = None try: sub = self._subs[ssid] except KeyError: # Already unsubscribed. return # In case subscription has already received enough messages # then announce to the server that we are unsubscribing and # remove the callback locally too. if max_msgs == 0 or sub.received >= max_msgs: self._subs.pop(ssid, None) self._remove_subscription(sub) # We will send these for all subs when we reconnect anyway, # so that we can suppress here. if not self.is_reconnecting: yield self.auto_unsubscribe(ssid, max_msgs)
def unsubscribe(self, ssid, max_msgs=0)
Takes a subscription sequence id and removes the subscription from the client, optionally after receiving more than max_msgs, and unsubscribes immediatedly.
6.426991
6.122768
1.049687
if self.is_draining: raise ErrConnectionDraining yield self._unsubscribe(sid, limit)
def auto_unsubscribe(self, sid, limit=1)
Sends an UNSUB command to the server. Unsubscribe is one of the basic building blocks in order to be able to define request/response semantics via pub/sub by announcing the server limited interest a priori.
13.384769
13.199969
1.014
yield self.send_command(PONG_PROTO) if self._flush_queue.empty(): yield self._flush_pending()
def _process_ping(self)
The server will be periodically sending a PING, and if the the client does not reply a PONG back a number of times, it will close the connection sending an `-ERR 'Stale Connection'` error.
12.841137
11.921929
1.077102
payload_size = len(data) self.stats['in_msgs'] += 1 self.stats['in_bytes'] += payload_size msg = Msg(subject=subject.decode(), reply=reply.decode(), data=data) # Don't process the message if the subscription has been removed sub = self._subs.get(sid) if sub is None: raise tornado.gen.Return() # Check if it is an old style request. if sub.future is not None: sub.future.set_result(msg) # Discard subscription since done self._subs.pop(sid, None) self._remove_subscription(sub) raise tornado.gen.Return() # Let subscription wait_for_msgs coroutine process the messages, # but in case sending to the subscription task would block, # then consider it to be an slow consumer and drop the message. try: sub.pending_size += payload_size if sub.pending_size >= sub.pending_bytes_limit: # Substract again the bytes since throwing away # the message so would not be pending data. sub.pending_size -= payload_size if self._error_cb is not None: yield self._error_cb(ErrSlowConsumer()) raise tornado.gen.Return() sub.pending_queue.put_nowait(msg) except tornado.queues.QueueFull: if self._error_cb is not None: yield self._error_cb(ErrSlowConsumer())
def _process_msg(self, sid, subject, reply, data)
Dispatches the received message to the stored subscription. It first tries to detect whether the message should be dispatched to a passed callback. In case there was not a callback, then it tries to set the message into a future.
4.566271
4.490996
1.016761
# INFO {...} line = yield self.io.read_until(_CRLF_, max_bytes=None) _, args = line.split(INFO_OP + _SPC_, 1) self._server_info = tornado.escape.json_decode((args)) if 'max_payload' in self._server_info: self._max_payload_size = self._server_info["max_payload"] # Check whether we need to upgrade to TLS first of all if 'tls_required' in self._server_info and self._server_info['tls_required']: # Detach and prepare for upgrading the TLS connection. self._loop.remove_handler(self._socket.fileno()) tls_opts = {} if "tls" in self.options: # Allow customizing the TLS version though default # to one that the server supports at least. tls_opts = self.options["tls"] # Rewrap using a TLS connection, can't do handshake on connect # as the socket is non blocking. self._socket = ssl.wrap_socket( self._socket, do_handshake_on_connect=False, **tls_opts) # Use the TLS stream instead from now self.io = tornado.iostream.SSLIOStream(self._socket) self.io.set_close_callback(self._process_op_err) self.io._do_ssl_handshake() # Refresh state of the parser upon reconnect. if self.is_reconnecting: self._ps.reset() # CONNECT then send a PING expecting a PONG to make a # roundtrip to the server and assert that sent commands sent # this far have been processed already. cmd = self.connect_command() yield self.io.write(cmd) yield self.io.write(PING_PROTO) # FIXME: Add readline timeout for these. next_op = yield self.io.read_until( _CRLF_, max_bytes=MAX_CONTROL_LINE_SIZE) if self.options["verbose"] and OK_OP in next_op: next_op = yield self.io.read_until( _CRLF_, max_bytes=MAX_CONTROL_LINE_SIZE) if ERR_OP in next_op: err_line = next_op.decode() _, err_msg = err_line.split(_SPC_, 1) # FIXME: Maybe handling could be more special here, # checking for ErrAuthorization for example. # yield from self._process_err(err_msg) raise NatsError("nats: " + err_msg.rstrip('\r\n')) if PONG_PROTO in next_op: self._status = Client.CONNECTED self._loop.spawn_callback(self._read_loop) self._pongs = [] self._pings_outstanding = 0 self._ping_timer = tornado.ioloop.PeriodicCallback( self._ping_interval, self.options["ping_interval"] * 1000) self._ping_timer.start() # Queue and flusher for coalescing writes to the server. self._flush_queue = tornado.queues.Queue(maxsize=1024) self._loop.spawn_callback(self._flusher_loop)
def _process_connect_init(self)
Handles the initial part of the NATS protocol, moving from the (RE)CONNECTING to CONNECTED states when establishing a connection with the server.
5.029975
4.877572
1.031246
info = tornado.escape.json_decode(info_line.decode()) if 'connect_urls' in info: if info['connect_urls']: connect_urls = [] for connect_url in info['connect_urls']: uri = urlparse("nats://%s" % connect_url) srv = Srv(uri) srv.discovered = True # Filter for any similar server in the server pool already. should_add = True for s in self._server_pool: if uri.netloc == s.uri.netloc: should_add = False if should_add: connect_urls.append(srv) if self.options["dont_randomize"] is not True: shuffle(connect_urls) for srv in connect_urls: self._server_pool.append(srv)
def _process_info(self, info_line)
Process INFO lines sent by the server to reconfigure client with latest updates from cluster to enable server discovery.
3.805992
3.575426
1.064486
if self.options["dont_randomize"]: server = self._server_pool.pop(0) self._server_pool.append(server) else: shuffle(self._server_pool) s = None for server in self._server_pool: if self.options["max_reconnect_attempts"] > 0 and ( server.reconnects > self.options["max_reconnect_attempts"]): continue else: s = server return s
def _next_server(self)
Chooses next available server to connect.
3.536606
3.317451
1.066061
if self.is_connecting or self.is_closed or self.is_reconnecting: return if self.options["allow_reconnect"] and self.is_connected: self._status = Client.RECONNECTING yield self._attempt_reconnect() else: # Transition into CLOSED state self._status = Client.DISCONNECTED self._err = err yield self._close(Client.CLOSED)
def _process_op_err(self, err=None)
Process errors which occured while reading/parsing the protocol. It attempts to reconnect if `allow_reconnect' is enabled.
5.714133
5.121501
1.115714
# Continue trying to connect until there is an available server # or bail in case there are no more available servers. while True: if len(self._server_pool) == 0: self._current_server = None raise ErrNoServers now = time.time() s = self._server_pool.pop(0) if self.options["max_reconnect_attempts"] > 0: if s.reconnects > self.options["max_reconnect_attempts"]: # Discard server since already tried to reconnect too many times. continue # Not yet exceeded max_reconnect_attempts so can still use # this server in the future. self._server_pool.append(s) if s.last_attempt is not None and now < s.last_attempt + self.options["reconnect_time_wait"]: # Backoff connecting to server if we attempted recently. yield tornado.gen.sleep(self.options["reconnect_time_wait"]) try: yield self._server_connect(s) self._current_server = s break except Exception as e: s.last_attempt = time.time() s.reconnects += 1 self._err = e if self._error_cb is not None: self._error_cb(e) self._status = Client.RECONNECTING continue
def _select_next_server(self)
Looks up in the server pool for an available server and attempts to connect.
3.860877
3.760585
1.026669
if self.is_closed: self._status = status return self._status = Client.CLOSED # Stop background tasks yield self._end_flusher_loop() if self._ping_timer is not None and self._ping_timer.is_running(): self._ping_timer.stop() if not self.io.closed(): self.io.close() # Cleanup subscriptions since not reconnecting so no need # to replay the subscriptions anymore. for ssid, sub in self._subs.items(): self._subs.pop(ssid, None) self._remove_subscription(sub) if do_callbacks: if self._disconnected_cb is not None: self._disconnected_cb() if self._closed_cb is not None: self._closed_cb()
def _close(self, status, do_callbacks=True)
Takes the status on which it should leave the connection and an optional boolean parameter to dispatch the disconnected and close callbacks if there are any.
4.228616
4.067975
1.039489
if self.is_draining: return if self.is_closed: raise ErrConnectionClosed if self.is_connecting or self.is_reconnecting: raise ErrConnectionReconnecting # Drain a single subscription if sid is not None: raise tornado.gen.Return(self._drain_sub(sid)) # Start draining the subscriptions self._status = Client.DRAINING_SUBS drain_tasks = [] for ssid, sub in self._subs.items(): task = self._drain_sub(ssid) drain_tasks.append(task) # Wait for subscriptions to stop handling messages. drain_is_done = tornado.gen.multi(drain_tasks) try: yield tornado.gen.with_timeout( timedelta(seconds=self.options["drain_timeout"]), drain_is_done, ) except tornado.gen.TimeoutError: if self._error_cb is not None: yield self._error_cb(ErrDrainTimeout()) finally: self._status = Client.DRAINING_PUBS yield self.flush() yield self.close()
def drain(self, sid=None)
Drain will put a connection into a drain state. All subscriptions will immediately be put into a drain state. Upon completion, the publishers will be drained and can not publish any additional messages. Upon draining of the publishers, the connection will be closed. Use the `closed_cb' option to know when the connection has moved from draining to closed. If a sid is passed, just the subscription with that sid will be drained without closing the connection.
3.494812
3.341598
1.045851
self.stats['errors_received'] += 1 if err == "'Authorization Violation'": self._err = ErrAuthorization elif err == "'Slow Consumer'": self._err = ErrSlowConsumer elif err == "'Stale Connection'": self._err = ErrStaleConnection else: self._err = Exception(err) if self._error_cb is not None: self._error_cb(err)
def _process_err(self, err=None)
Stores the last received error from the server and dispatches the error callback.
4.695901
4.084105
1.149799
while True: if not self.is_connected or self.is_connecting or self.io.closed(): break try: yield self.io.read_bytes( DEFAULT_READ_CHUNK_SIZE, streaming_callback=self._ps.parse, partial=True) except tornado.iostream.StreamClosedError as e: self._err = e if self._error_cb is not None and not self.is_reconnecting and not self.is_closed: self._error_cb(e) break
def _read_loop(self, data='')
Read loop for gathering bytes from the server in a buffer of maximum MAX_CONTROL_LINE_SIZE, then received bytes are streamed to the parsing callback for processing.
4.829369
4.449449
1.085386
while True: pending = [] pending_size = 0 try: # Block and wait for the flusher to be kicked yield self._flush_queue.get() # Check whether we should bail first if not self.is_connected or self.is_connecting or self.io.closed(): break # Flush only when we actually have something in buffer... if self._pending_size > 0: cmds = b''.join(self._pending) # Reset pending queue and store tmp in case write fails self._pending, pending = [], self._pending self._pending_size, pending_size = 0, self._pending_size yield self.io.write(cmds) except tornado.iostream.StreamBufferFullError: # Acumulate as pending data size and flush when possible. self._pending = pending + self._pending self._pending_size += pending_size except tornado.iostream.StreamClosedError as e: self._pending = pending + self._pending self._pending_size += pending_size yield self._process_op_err(e)
def _flusher_loop(self)
Coroutine which continuously tries to consume pending commands and then flushes them to the socket.
5.268256
5.003307
1.052955
if not self.is_connected or self.is_connecting or self.io.closed(): if self._flush_queue is not None and self._flush_queue.empty(): self._flush_pending(check_connected=False) yield tornado.gen.moment
def _end_flusher_loop(self)
Let flusher_loop coroutine quit - useful when disconnecting.
7.39145
6.364502
1.161356
key = None # ## debug output # sys.stderr.write("DEBUG: %s to %s\n" %(b,a)) try: if a is None or isinstance(a, (six.string_types, float, six.integer_types)): # border case for first run or if a is a primitive a = b elif isinstance(a, list): # lists can be only appended if isinstance(b, list): # merge lists a.extend(b) else: # append to list a.append(b) elif isinstance(a, dict): # dicts must be merged if isinstance(b, dict): for key in b: if key in a: a[key] = data_merge(a[key], b[key]) else: a[key] = b[key] else: raise YamlReaderError('Cannot merge non-dict "%s" into dict "%s"' % (b, a)) else: raise YamlReaderError('NOT IMPLEMENTED "%s" into "%s"' % (b, a)) except TypeError as e: raise YamlReaderError('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a)) return a
def data_merge(a, b)
merges b into a and return merged result based on http://stackoverflow.com/questions/7204805/python-dictionaries-of-dictionaries-merge and extended to also merge arrays and to replace the content of keys with the same name NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen
2.836216
2.783201
1.019048
logger = logging.getLogger(__name__) logger.debug("initialized with source=%s, defaultdata=%s", source, defaultdata) if defaultdata is NO_DEFAULT: data = None else: data = defaultdata files = [] if type(source) is not str and len(source) == 1: # when called from __main source is always a list, even if it contains only one item. # turn into a string if it contains only one item to support our different call modes source = source[0] if type(source) is list or type(source) is tuple: # got a list, assume to be files files = source elif os.path.isdir(source): # got a dir, read all *.yaml files files = sorted(glob.glob(os.path.join(source, "*.yaml"))) elif os.path.isfile(source): # got a single file, turn it into list to use the same code files = [source] else: # try to use the source as a glob files = sorted(glob.glob(source)) if files: logger.debug("Reading %s\n", ", ".join(files)) for yaml_file in files: try: with open(yaml_file) as f: new_data = safe_load(f) logger.debug("YAML LOAD: %s", new_data) except MarkedYAMLError as e: logger.error("YAML Error: %s", e) raise YamlReaderError("YAML Error: %s" % str(e)) if new_data is not None: data = data_merge(data, new_data) else: if defaultdata is NO_DEFAULT: logger.error("No YAML data found in %s and no default data given", source) raise YamlReaderError("No YAML data found in %s" % source) return data
def yaml_load(source, defaultdata=NO_DEFAULT)
merge YAML data from files found in source Always returns a dict. The YAML files are expected to contain some kind of key:value structures, possibly deeply nested. When merging, lists are appended and dict keys are replaced. The YAML files are read with the yaml.safe_load function. source can be a file, a dir, a list/tuple of files or a string containing a glob expression (with ?*[]). For a directory, all *.yaml files will be read in alphabetical order. defaultdata can be used to initialize the data.
2.890388
2.835058
1.019516
''' Generate a widget visualization using the widget. The export_viz_to_widget method passes the visualization JSON to the instantiated widget, which is returned and visualized on the front-end. ''' if hasattr(self, 'widget_class') == True: self.widget_instance = self.widget_class(network = self.export_viz_to_widget(which_viz)) return self.widget_instance else: print('Can not make widget because Network has no attribute widget_class') print('Please instantiate Network with clustergrammer_widget using: Network(clustergrammer_widget)')
def widget(self, which_viz='viz')
Generate a widget visualization using the widget. The export_viz_to_widget method passes the visualization JSON to the instantiated widget, which is returned and visualized on the front-end.
8.039989
3.678897
2.185434
curr = datetime.datetime.combine(d1, datetime.time()) end = datetime.datetime.combine(d2, datetime.time()) if d1.date() == d2.date(): yield curr return while curr < end: yield curr curr = curr + datetime.timedelta(days=1)
def iterdays(self, d1, d2)
Date iterator returning dates in d1 <= x < d2
2.270354
2.264678
1.002506
for dt in self.iterdays(d1, d2): if not self.isweekend(dt): yield dt
def iterweekdays(self, d1, d2)
Date iterator returning dates in d1 <= x < d2, excluding weekends
3.218226
2.637219
1.22031
assert d2 >= d1 if d1.date() == d2.date() and d2.time() < self.business_hours[0]: return first = True for dt in self.iterdays(d1, d2): if first and d1.time() > self.business_hours[1]: first = False continue first = False if not self.isweekend(dt) and not self.isholiday(dt): yield dt
def iterbusinessdays(self, d1, d2)
Date iterator returning dates in d1 <= x < d2, excluding weekends and holidays
3.203577
3.09291
1.035781
if d1 > d2: d1, d2, timedelta_direction = d2, d1, -1 else: timedelta_direction = 1 businessdays = self._build_spanning_datetimes(d1, d2) time = datetime.timedelta() if len(businessdays) == 0: # HACK: manually handle the case when d1 is after business hours while d2 is during if self.isduringbusinesshours(d2): time += d2 - datetime.datetime.combine(d2, self.business_hours[0]) # HACK: manually handle the case where d1 is on an earlier non-business day and d2 is after hours on a business day elif not self.isbusinessday(d1) and self.isbusinessday(d2): if d2.time() > self.business_hours[1]: time += datetime.datetime.combine( d2, self.business_hours[1]) - datetime.datetime.combine( d2, self.business_hours[0]) elif d2.time() > self.business_hours[0]: time += d2 - datetime.datetime.combine( d2, self.business_hours[0]) else: prev = None current = None count = 0 for d in businessdays: if current is None: current = d current = datetime.datetime.combine(d, current.time()) if prev is not None: if prev.date() != current.date(): time += datetime.timedelta(days=1) if count == len(businessdays) - 1: if current > d: # We went too far time -= datetime.timedelta(days=1) time += self.open_hours - (current - d) else: time += d - current count += 1 prev = current return time * timedelta_direction
def businesstimedelta(self, d1, d2)
Returns a datetime.timedelta with the number of full business days and business time between d1 and d2
2.99397
2.94821
1.015521
open_hours = self.open_hours.seconds / 3600 btd = self.businesstimedelta(d1, d2) btd_hours = btd.seconds / 3600 return datetime.timedelta(hours=(btd.days * open_hours + btd_hours))
def businesstime_hours(self, d1, d2)
Returns a datetime.timedelta of business hours between d1 and d2, based on the length of the businessday
3.537667
3.408042
1.038035
if dt.weekday() == 4: sat = dt + datetime.timedelta(days=1) if super(USFederalHolidays, self)._day_rule_matches(rule, sat): return True elif dt.weekday() == 0: sun = dt - datetime.timedelta(days=1) if super(USFederalHolidays, self)._day_rule_matches(rule, sun): return True return super(USFederalHolidays, self)._day_rule_matches(rule, dt)
def _day_rule_matches(self, rule, dt)
Day-of-month-specific US federal holidays that fall on Sat or Sun are observed on Fri or Mon respectively. Note that this method considers both the actual holiday and the day of observance to be holidays.
2.064313
1.830666
1.12763
def act_on_cloned_repo(self, path: Union[str, pathlib.Path], api) -> Optional[HookResult]
Do something with a cloned repo. Args: path: Path to the repo. api: An instance of :py:class:`repobee.github_api.GitHubAPI`. Returns: optionally returns a HookResult namedtuple for reporting the outcome of the hook. May also return None, in which case no reporting will be performed for the hook.
41,678.753906
269,053
0.154909
def generate_review_allocations( self, master_repo_name: str, students: Iterable[str], num_reviews: int, review_team_name_function: Callable[[str, str], str] ) -> Mapping[str, List[str]]
Generate a (peer_review_team -> reviewers) mapping for each student repository (i.e. <student>-<master_repo_name>), where len(reviewers) = num_reviews. review_team_name_function should be used to generate review team names. It should be called like: .. code-block:: python review_team_name_function(master_repo_name, student) .. important:: There must be strictly more students than reviewers per repo (`num_reviews`). Otherwise, allocation is impossible. Args: master_repo_name: Name of a master repository. students: Students for which to generate peer review allocations. num_reviews: Amount of reviews each student should perform (and consequently amount of reviewers per repo) review_team_name_function: A function that takes a master repo name as its first argument, and a student username as its second, and returns a review team name. Returns: a (peer_review_team -> reviewers) mapping for each student repository.
59,379.992188
8,489.966797
6.994137
hroot = self.root / hashroot if not hroot.is_dir(): hroot.mkdir() hfile = hroot / gethashfile(key) d = self.get(hfile, {}) d.update( {key : value}) self[hfile] = d
def hset(self, hashroot, key, value)
hashed set
4.013617
4.235768
0.947554
hroot = self.root / hashroot hfile = hroot / gethashfile(key) d = self.get(hfile, _sentinel ) #print "got dict",d,"from",hfile if d is _sentinel: if fast_only: if default is _sentinel: raise KeyError(key) return default # slow mode ok, works even after hcompress() d = self.hdict(hashroot) return d.get(key, default)
def hget(self, hashroot, key, default = _sentinel, fast_only = True)
hashed get
6.60429
6.811879
0.969526
hfiles = self.keys(hashroot + "/*") all = {} for f in hfiles: # print "using",f all.update(self[f]) self.uncache(f) self[hashroot + '/xx'] = all for f in hfiles: p = self.root / f if p.name == 'xx': continue p.unlink()
def hcompress(self, hashroot)
Compress category 'hashroot', so hset is fast again hget will fail if fast_only is True for compressed items (that were hset before hcompress).
6.206652
6.116821
1.014686
if globpat is None: files = self.root.rglob('*') else: files = self.root.glob(globpat) return [self._normalized(p) for p in files if p.is_file()]
def keys(self, globpat = None)
All keys in DB, or all keys matching a glob
3.449336
3.623309
0.951985
if not items: self.cache = {} for it in items: self.cache.pop(it,None)
def uncache(self,*items)
Removes all, or specified items from cache Use this after reading a large amount of large objects to free up memory, when you won't be needing the objects for a while.
3.221178
3.674418
0.87665
wtimes = [0.2] * 3 + [0.5] * 2 + [1] tries = 0 waited = 0 while 1: try: val = self[key] return val except KeyError: pass if waited > maxwaittime: raise KeyError(key) time.sleep(wtimes[tries]) waited+=wtimes[tries] if tries < len(wtimes) -1: tries+=1
def waitget(self,key, maxwaittime = 60 )
Wait (poll) for a key to get a value Will wait for `maxwaittime` seconds before raising a KeyError. The call exits normally if the `key` field in db gets a value within the timeout period. Use this for synchronizing different processes or for ensuring that an unfortunately timed "db['key'] = newvalue" operation in another process (which causes all 'get' operation to cause a KeyError for the duration of pickling) won't screw up your program logic.
2.965458
2.87178
1.03262
# TODO(jogo): make the following doctests pass: # H101: #TODO(jogo fail # H101: #TODO(jogo # TODO(jogo): make this check docstrings as well (don't have to be at top # of function) for token_type, text, start_index, _, _ in tokens: if token_type == tokenize.COMMENT: pos = text.find('TODO') pos1 = text.find('TODO(') if (pos != pos1): return pos + start_index[1], "H101: Use TODO(NAME)"
def hacking_todo_format(physical_line, tokens)
Check for 'TODO()'. OpenStack HACKING guide recommendation for TODO: Include your name with TODOs as in "# TODO(termie)" Okay: # TODO(sdague) H101: # TODO fail H101: # TODO H101: # TODO (jogo) fail Okay: TODO = 5
7.413051
5.827894
1.271995
# don't work about init files for now # TODO(sdague): enforce license in init file if it's not empty of content license_found = False # skip files that are < 10 lines, which isn't enough for a license to fit # this allows us to handle empty files, as well as not fail on the Okay # doctests. if line_number is 1 and len(lines) > 10 and _project_is_apache(): for idx, line in enumerate(lines): # if it's more than 10 characters in, it's probably not in the # header if 0 <= line.find('Licensed under the Apache License') < 10: license_found = True if 0 <= line.find('SPDX-License-Identifier:') < 10: license_found = True if not license_found: return (0, "H102: Apache 2.0 license header not found")
def hacking_has_license(physical_line, filename, lines, line_number)
Check for Apache 2.0 license. H102 license header not found
7.14204
6.685892
1.068225
# don't work about init files for now # TODO(sdague): enforce license in init file if it's not empty of content # skip files that are < 10 lines, which isn't enough for a license to fit # this allows us to handle empty files, as well as not fail on the Okay # doctests. if line_number is 1 and len(lines) > 10 and _project_is_apache(): for idx, line in enumerate(lines): column = line.find('Licensed under the Apache License') if (0 < column < 10 and not _check_for_exact_apache(idx, lines)): if (line.find('SPDX-License-Identifier: Apache-2.0') <= 0): return (column, "H103: Header does not match Apache 2.0 " "License notice")
def hacking_has_correct_license(physical_line, filename, lines, line_number)
Check for Apache 2.0 license. H103 header does not match Apache 2.0 License notice
10.182498
8.485791
1.199947
if line_number == 1 and all(map(EMPTY_LINE_RE.match, lines)): return (0, "H104: File contains nothing but comments")
def hacking_has_only_comments(physical_line, filename, lines, line_number)
Check for empty files with only comments H104 empty file with only comments
7.712516
5.480848
1.407176
global _is_apache_cache if _is_apache_cache is not None: return _is_apache_cache license_files = ["LICENSE"] for filename in license_files: try: with open(filename, "r") as file: for line in file: if re.search('Apache License', line): _is_apache_cache = True return True except IOError: pass _is_apache_cache = False return False
def _project_is_apache()
Determine if a project is Apache. Look for a key string in a set of possible license files to figure out if a project looks to be Apache. This is used as a precondition for enforcing license headers.
2.593203
2.213563
1.171506
APACHE2 = # out of all the formatting I've seen, a 12 line version seems to be the # longest in the source tree. So just take the 12 lines starting with where # the Apache starting words were found, strip all the '#' and collapse the # spaces. content = ''.join(lines[start:(start + 12)]) content = re.sub('\#', '', content) content = re.sub('\s+', ' ', content).strip() stripped_apache2 = re.sub('\s+', ' ', APACHE2).strip() if stripped_apache2 in content: return True else: print("<license>!=<apache2>:\n'%s' !=\n'%s'" % (content, stripped_apache2)) return False
def _check_for_exact_apache(start, lines)
Check for the Apache 2.0 license header. We strip all the newlines and extra spaces so this license string should work regardless of indentation in the file.
7.848392
7.846868
1.000194
for regex in AUTHOR_TAG_RE: if regex.match(physical_line): physical_line = physical_line.lower() pos = physical_line.find('moduleauthor') if pos < 0: pos = physical_line.find('author') return (pos, "H105: Don't use author tags")
def hacking_no_author_tags(physical_line)
Check that no author tags are used. H105 don't use author tags
4.330856
3.684425
1.175449
r if noqa: return def is_old_style_except(logical_line): return (',' in logical_line and ')' not in logical_line.rpartition(',')[2]) if (logical_line.startswith("except ") and logical_line.endswith(':') and is_old_style_except(logical_line)): yield 0, "H231: Python 3.x incompatible 'except x,y:' construct"
def hacking_python3x_except_compatible(logical_line, noqa)
r"""Check for except statements to be Python 3.x compatible As of Python 3.x, the construct 'except x,y:' has been removed. Use 'except x as y:' instead. Okay: try:\n pass\nexcept Exception:\n pass Okay: try:\n pass\nexcept (Exception, AttributeError):\n pass H231: try:\n pass\nexcept AttributeError, e:\n pass Okay: try:\n pass\nexcept AttributeError, e: # noqa\n pass
4.459384
4.307719
1.035208
r if noqa: return for token_type, text, _, _, _ in tokens: if token_type == tokenize.NUMBER: match = RE_OCTAL.match(text) if match: yield 0, ("H232: Python 3.x incompatible octal %s should be " "written as 0o%s " % (match.group(0)[1:], match.group(1)))
def hacking_python3x_octal_literals(logical_line, tokens, noqa)
r"""Check for octal literals in Python 3.x compatible form. As of Python 3.x, the construct "0755" has been removed. Use "0o755" instead". Okay: f(0o755) Okay: 'f(0755)' Okay: f(755) Okay: f(0) Okay: f(000) Okay: MiB = 1.0415 H232: f(0755) Okay: f(0755) # noqa
4.989726
5.254316
0.949643
r if noqa: return for match in RE_PRINT.finditer(logical_line): yield match.start(0), ( "H233: Python 3.x incompatible use of print operator")
def hacking_python3x_print_function(logical_line, noqa)
r"""Check that all print occurrences look like print functions. Check that all occurrences of print look like functions, not print operator. As of Python 3.x, the print operator has been removed. Okay: print(msg) Okay: print (msg) Okay: print msg # noqa Okay: print() H233: print msg H233: print >>sys.stderr, "hello" H233: print msg, H233: print
9.97004
9.380337
1.062866
r if noqa: return for token_type, text, start_index, _, _ in tokens: if token_type == tokenize.NAME: if text == "assertEquals" or text == "assertNotEquals": yield (start_index[1], "H234: %s is deprecated, use %s" % (text, text[:-1]))
def hacking_no_assert_equals(logical_line, tokens, noqa)
r"""assert(Not)Equals() is deprecated, use assert(Not)Equal instead. Okay: self.assertEqual(0, 0) Okay: self.assertNotEqual(0, 1) H234: self.assertEquals(0, 0) H234: self.assertNotEquals(0, 1) Okay: self.assertEquals(0, 0) # noqa Okay: self.assertNotEquals(0, 1) # noqa
4.630324
4.26819
1.084845
r if noqa: return split_line = logical_line.split() if(len(split_line) > 2 and split_line[0] == '__metaclass__' and split_line[1] == '='): yield (logical_line.find('__metaclass__'), "H236: Python 3.x incompatible __metaclass__, " "use six.add_metaclass()")
def hacking_python3x_metaclass(logical_line, noqa)
r"""Check for metaclass to be Python 3.x compatible. Okay: @six.add_metaclass(Meta)\nclass Foo(object):\n pass Okay: @six.with_metaclass(Meta)\nclass Foo(object):\n pass Okay: class Foo(object):\n '''docstring\n\n __metaclass__ = Meta\n''' H236: class Foo(object):\n __metaclass__ = Meta H236: class Foo(object):\n foo=bar\n __metaclass__ = Meta H236: class Foo(object):\n '''docstr.'''\n __metaclass__ = Meta H236: class Foo(object):\n __metaclass__ = \\\n Meta Okay: class Foo(object):\n __metaclass__ = Meta # noqa
4.497555
3.992972
1.126368
r if noqa: return line = core.import_normalize(logical_line.strip()) if line and line.split()[0] == 'import': module_name = line.split()[1].split('.')[0] if module_name in removed_modules: yield 0, ("H237: module %s is " "removed in Python 3" % module_name)
def hacking_no_removed_module(logical_line, noqa)
r"""Check for removed modules in Python 3. Examples: Okay: from os import path Okay: from os import path as p Okay: from os import (path as p) Okay: import os.path H237: import thread Okay: import thread # noqa H237: import commands H237: import md5 as std_md5
5.073585
5.194854
0.976656
r if noqa: return line = core.import_normalize(logical_line.strip()) if line.startswith("class ") and not RE_NEW_STYLE_CLASS.match(line): yield (0, "H238: old style class declaration, " "use new style (inherit from `object`)")
def hacking_no_old_style_class(logical_line, noqa)
r"""Check for old style classes. Examples: Okay: class Foo(object):\n pass Okay: class Foo(Bar, Baz):\n pass Okay: class Foo(object, Baz):\n pass Okay: class Foo(somefunc()):\n pass H238: class Bar:\n pass H238: class Bar():\n pass
9.37503
9.823656
0.954332
r if ((line_number <= 5 or line_number > len(lines) - 5) and vim_header_re.match(physical_line)): return 0, "H106: Don't put vim configuration in source files"
def no_vim_headers(physical_line, line_number, lines)
r"""Check for vim editor configuration in source files. By default vim modelines can only appear in the first or last 5 lines of a source file. Examples: H106: # vim: set tabstop=4 shiftwidth=4\n#\n#\n#\n#\n# H106: # Lic\n# vim: set tabstop=4 shiftwidth=4\n#\n#\n#\n#\n# H106: # Lic\n#\n#\n#\n#\n#\n#\n#\n#\n# vim: set tabstop=4 shiftwidth=4 Okay: # Lic\n#\n#\n#\n#\n#\n#\n# Okay: # viminal hill is located in Rome Okay: # vim, ze nemluvis cesky
8.207575
5.988097
1.370648
r # TODO(jogo): make the following doctests pass: # H301: import os, sys # TODO(mordred: We need to split this into different checks so that they # can be disabled by command line switches properly if noqa: return split_line = logical_line.split() split_line_len = len(split_line) if (split_line_len > 1 and split_line[0] in ('import', 'from') and not core.is_import_exception(split_line[1])): pos = logical_line.find(',') if pos != -1: if split_line[0] == 'from': yield pos, "H301: one import per line" pos = logical_line.find('*') if pos != -1: yield pos, "H303: No wildcard (*) import." return if split_line_len in (2, 4, 6) and split_line[1] != "__future__": if 'from' == split_line[0] and split_line_len > 3: mod = '.'.join((split_line[1], split_line[3])) if core.is_import_exception(mod): return if RE_RELATIVE_IMPORT.search(logical_line): yield logical_line.find('.'), ( "H304: No relative imports. '%s' is a relative import" % logical_line) return
def hacking_import_rules(logical_line, filename, noqa)
r"""Check for imports. OpenStack HACKING guide recommends one import per line: Do not import more than one module per line Examples: Okay: from nova.compute import api H301: from nova.compute import api, utils Do not use wildcard import Do not make relative imports Examples: Okay: from os import path Okay: from os import path as p Okay: from os import (path as p) Okay: import os.path Okay: from nova.compute import rpcapi Okay: from six.moves.urllib import parse H303: from os.path import * H304: from .compute import rpcapi
4.189359
4.105834
1.020343
r # handle import x # use .lower since capitalization shouldn't dictate order if blank_before < 1 and indent_level == previous_indent_level: split_line = core.import_normalize(logical_line. strip()).lower().split() split_previous = core.import_normalize(previous_logical. strip()).lower().split() length = [2, 4] if (len(split_line) in length and len(split_previous) in length and split_line[0] == "import" and split_previous[0] == "import"): if split_line[1] < split_previous[1]: yield (0, "H306: imports not in alphabetical order (%s, %s)" % (split_previous[1], split_line[1]))
def hacking_import_alphabetical(logical_line, blank_before, previous_logical, indent_level, previous_indent_level)
r"""Check for imports in alphabetical order. OpenStack HACKING guide recommendation for imports: imports in human alphabetical order Okay: import os\nimport sys\n\nimport nova\nfrom nova import test Okay: import os\nimport sys H306: import sys\nimport os Okay: import sys\n\n# foo\nimport six
4.690649
4.91291
0.95476
return (mod in IMPORT_EXCEPTIONS or any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS))
def is_import_exception(mod)
Check module name to see if import has been whitelisted. Import based rules should not run on any whitelisted module
4.486409
5.331714
0.841457
if self.name and self.name not in self.__class__._has_run: self.__class__._has_run.add(self.name) ret = self.run_once() if ret is not None: yield ret
def run(self)
Make run a no-op if run() has been called before. Store in a global registry the list of checks we've run. If we have run that one before, just skip doing anything the subsequent times. This way, since pycodestyle is file/line based, we don't wind up re-running a check on a git commit message over and over again.
4.005771
3.512854
1.140318
if noqa: return for_formatting = False for token_type, text, start, _, _ in tokens: if text == "%" and token_type == tokenize.OP: for_formatting = True if for_formatting and token_type == tokenize.NAME: for k, v in LOCALS_TEXT_MAP.items(): if text == k and v in logical_line: yield (start[1], "H501: Do not use %s for string formatting" % v)
def hacking_no_locals(logical_line, tokens, noqa)
Do not use locals() or self.__dict__ for string formatting. Okay: 'locals()' Okay: 'locals' Okay: locals() Okay: print(locals()) H501: print("%(something)" % locals()) H501: LOG.info(_("%(something)") % self.__dict__) Okay: print("%(something)" % locals()) # noqa
4.788245
4.495591
1.065098
r docstring = is_docstring(tokens, previous_logical) if docstring: start, start_triple = _find_first_of(docstring, START_DOCSTRING_TRIPLE) if docstring[len(start_triple)] == ' ': # docstrings get tokenized on the last line of the docstring, so # we don't know the exact position. return (0, "H401: docstring should not start with" " a space")
def hacking_docstring_start_space(physical_line, previous_logical, tokens)
r"""Check for docstring not starting with space. OpenStack HACKING guide recommendation for docstring: Docstring should not start with space Okay: def foo():\n '''This is good.''' Okay: def foo():\n r'''This is good.''' Okay: def foo():\n a = ''' This is not a docstring.''' Okay: def foo():\n pass\n ''' This is not.''' H401: def foo():\n ''' This is not.''' H401: def foo():\n r''' This is not.'''
8.317098
7.471234
1.113216
r docstring = is_docstring(tokens, previous_logical) if docstring: if '\n' not in docstring: # not a multi line return else: last_line = docstring.split('\n')[-1] pos = max(last_line.rfind(i) for i in END_DOCSTRING_TRIPLE) if len(last_line[:pos].strip()) > 0: # Something before the end docstring triple return (pos, "H403: multi line docstrings should end on a new line")
def hacking_docstring_multiline_end(physical_line, previous_logical, tokens)
r"""Check multi line docstring end. OpenStack HACKING guide recommendation for docstring: Docstring should end on a new line Okay: '''foobar\nfoo\nbar\n''' Okay: def foo():\n '''foobar\n\nfoo\nbar\n''' Okay: class Foo(object):\n '''foobar\n\nfoo\nbar\n''' Okay: def foo():\n a = '''not\na\ndocstring''' Okay: def foo():\n a = '''not\na\ndocstring''' # blah Okay: def foo():\n pass\n'''foobar\nfoo\nbar\n d''' H403: def foo():\n '''foobar\nfoo\nbar\ndocstring''' H403: def foo():\n '''foobar\nfoo\nbar\npretend raw: r''' H403: class Foo(object):\n '''foobar\nfoo\nbar\ndocstring'''\n\n
6.180448
4.947543
1.249196
r docstring = is_docstring(tokens, previous_logical) if docstring: if '\n' not in docstring: # single line docstring return start, start_triple = _find_first_of(docstring, START_DOCSTRING_TRIPLE) lines = docstring.split('\n') if lines[0].strip() == start_triple: # docstrings get tokenized on the last line of the docstring, so # we don't know the exact position. return (0, "H404: multi line docstring " "should start without a leading new line")
def hacking_docstring_multiline_start(physical_line, previous_logical, tokens)
r"""Check multi line docstring starts immediately with summary. OpenStack HACKING guide recommendation for docstring: Docstring should start with a one-line summary, less than 80 characters. Okay: '''foobar\n\nfoo\nbar\n''' Okay: def foo():\n a = '''\nnot\na docstring\n''' H404: def foo():\n '''\nfoo\nbar\n'''\n\n H404: def foo():\n r'''\nfoo\nbar\n'''\n\n
7.462312
6.611871
1.128623
r docstring = is_docstring(tokens, previous_logical) if docstring: if '\n' not in docstring: # not a multi line docstring return lines = docstring.split('\n') if len(lines) > 1 and len(lines[1].strip()) is not 0: # docstrings get tokenized on the last line of the docstring, so # we don't know the exact position. return (0, "H405: multi line docstring " "summary not separated with an empty line")
def hacking_docstring_summary(physical_line, previous_logical, tokens)
r"""Check multi line docstring summary is separated with empty line. OpenStack HACKING guide recommendation for docstring: Docstring should start with a one-line summary, less than 80 characters. Okay: def foo():\n a = '''\nnot\na docstring\n''' Okay: '''foobar\n\nfoo\nbar\n''' H405: def foo():\n '''foobar\nfoo\nbar\n''' H405: def foo():\n r'''foobar\nfoo\nbar\n''' H405: def foo():\n '''foobar\n'''
6.455844
5.207875
1.239631
for token_type, text, start, _, _ in tokens: if token_type == tokenize.STRING: break elif token_type != tokenize.INDENT: return False else: return False line = text.lstrip() start, start_triple = _find_first_of(line, START_DOCSTRING_TRIPLE) if (previous_logical.startswith("def ") or previous_logical.startswith("class ")): if start == 0: return text
def is_docstring(tokens, previous_logical)
Return found docstring 'A docstring is a string literal that occurs as the first statement in a module, function, class,' http://www.python.org/dev/peps/pep-0257/#what-is-a-docstring
4.100005
3.766293
1.088605
starts = ((line.find(i), i) for i in substrings) found = [(i, sub) for i, sub in starts if i != -1] if found: return min(found) else: return -1, None
def _find_first_of(line, substrings)
Find earliest occurrence of one of substrings in line. Returns pair of index and found substring, or (-1, None) if no occurrences of any of substrings were found in line.
3.438584
3.141348
1.09462
while True: try: token_type, text, _, _, line = yield except GeneratorExit: return if text == "def" and token_type == tokenize.NAME: # explicitly ignore function definitions, as oslo defines these return if (token_type == tokenize.NAME and text in ["_", "_LI", "_LW", "_LE", "_LC"]): while True: token_type, text, start, _, _ = yield if token_type != tokenize.NL: break if token_type != tokenize.OP or text != "(": continue # not a localization call format_string = '' while True: token_type, text, start, _, _ = yield if token_type == tokenize.STRING: format_string += eval(text) elif token_type == tokenize.NL: pass else: break if not format_string: raise LocalizationError( start, "H701: Empty localization string") if token_type != tokenize.OP: raise LocalizationError( start, "H701: Invalid localization call") if text != ")": if text == "%": raise LocalizationError( start, "H702: Formatting operation should be outside" " of localization method call") elif text == "+": raise LocalizationError( start, "H702: Use bare string concatenation instead of +") else: raise LocalizationError( start, "H702: Argument to _, _LI, _LW, _LC, or _LE " "must be just a string") format_specs = FORMAT_RE.findall(format_string) positional_specs = [(key, spec) for key, spec in format_specs if not key and spec] # not spec means %%, key means %(smth)s if len(positional_specs) > 1: raise LocalizationError( start, "H703: Multiple positional placeholders")
def check_i18n()
Generator that checks token stream for localization errors. Expects tokens to be ``send``ed one by one. Raises LocalizationError if some error is found.
4.254859
4.068903
1.045702
r if noqa: return gen = check_i18n() next(gen) try: list(map(gen.send, tokens)) gen.close() except LocalizationError as e: yield e.args
def hacking_localization_strings(logical_line, tokens, noqa)
r"""Check localization in line. Okay: _("This is fine") Okay: _LI("This is fine") Okay: _LW("This is fine") Okay: _LE("This is fine") Okay: _LC("This is fine") Okay: _("This is also fine %s") Okay: _("So is this %s, %(foo)s") % {foo: 'foo'} H701: _('') Okay: def _(msg):\n pass Okay: def _LE(msg):\n pass H701: _LI('') H701: _LW('') H701: _LE('') H701: _LC('') Okay: _('') # noqa H702: _("Bob" + " foo") H702: _LI("Bob" + " foo") H702: _LW("Bob" + " foo") H702: _LE("Bob" + " foo") H702: _LC("Bob" + " foo") Okay: _("Bob" + " foo") # noqa H702: _("Bob %s" % foo) H702: _LI("Bob %s" % foo) H702: _LW("Bob %s" % foo) H702: _LE("Bob %s" % foo) H702: _LC("Bob %s" % foo) H702: _("%s %s" % (foo, bar)) H703: _("%s %s") % (foo, bar)
8.161616
12.915499
0.631924
'''Check whether an AST node corresponds to None. In Python 2 None uses the same ast.Name class that variables etc. use, but in Python 3 there is a new ast.NameConstant class. ''' if PY2: return isinstance(node, ast.Name) and node.id == 'None' return isinstance(node, ast.NameConstant) and node.value is None
def is_none(node)
Check whether an AST node corresponds to None. In Python 2 None uses the same ast.Name class that variables etc. use, but in Python 3 there is a new ast.NameConstant class.
4.653939
1.78632
2.605323
if noqa: return for func_name in ('assertEqual', 'assertIs', 'assertNotEqual', 'assertIsNot'): try: start = logical_line.index('.%s(' % func_name) + 1 except ValueError: continue checker = NoneArgChecker(func_name) checker.visit(ast.parse(logical_line)) if checker.none_found: yield start, "H203: Use assertIs(Not)None to check for None"
def hacking_assert_is_none(logical_line, noqa)
Use assertIs(Not)None to check for None in assertions. Okay: self.assertEqual('foo', 'bar') Okay: self.assertNotEqual('foo', {}.get('bar', None)) Okay: self.assertIs('foo', 'bar') Okay: self.assertIsNot('foo', 'bar', None) Okay: foo(self.assertIsNot('foo', 'bar')) H203: self.assertEqual(None, 'foo') H203: self.assertNotEqual('foo', None) H203: self.assertIs(None, 'foo', 'bar') H203: self.assertIsNot('foo', None, 'bar') H203: foo(self.assertIsNot('foo', None, 'bar')) Okay: self.assertEqual(None, 'foo') # noqa Okay: self.assertIs(None, 'foo') # noqa Okay: self.assertIsNone('foo')
5.131841
4.848706
1.058394
r if noqa: return methods = ['assertTrue', 'assertFalse'] for method in methods: start = logical_line.find('.%s' % method) + 1 if start != 0: break else: return comparisons = [ast.Eq, ast.NotEq] checker = AssertTrueFalseChecker(methods, comparisons) checker.visit(ast.parse(logical_line)) if checker.error: yield start, 'H204: Use assert(Not)Equal()'
def hacking_assert_equal(logical_line, noqa)
r"""Check that self.assertEqual and self.assertNotEqual are used. Okay: self.assertEqual(x, y) Okay: self.assertNotEqual(x, y) H204: self.assertTrue(x == y) H204: self.assertTrue(x != y) H204: self.assertFalse(x == y) H204: self.assertFalse(x != y)
5.355133
6.001068
0.892363
r if noqa: return methods = ['assertTrue', 'assertFalse'] for method in methods: start = logical_line.find('.%s' % method) + 1 if start != 0: break else: return comparisons = [ast.Gt, ast.GtE, ast.Lt, ast.LtE] checker = AssertTrueFalseChecker(methods, comparisons) checker.visit(ast.parse(logical_line)) if checker.error: yield start, 'H205: Use assert{Greater,Less}[Equal]'
def hacking_assert_greater_less(logical_line, noqa)
r"""Check that self.assert{Greater,Less}[Equal] are used. Okay: self.assertGreater(x, y) Okay: self.assertGreaterEqual(x, y) Okay: self.assertLess(x, y) Okay: self.assertLessEqual(x, y) H205: self.assertTrue(x > y) H205: self.assertTrue(x >= y) H205: self.assertTrue(x < y) H205: self.assertTrue(x <= y)
4.96584
4.67711
1.061733
r pos = physical_line.find('\r') if pos != -1 and pos == (len(physical_line) - 2): return (pos, "H903: Windows style line endings not allowed in code")
def hacking_no_cr(physical_line)
r"""Check that we only use newlines not carriage returns. Okay: import os\nimport sys # pep8 doesn't yet replace \r in strings, will work on an # upstream fix H903 import os\r\nimport sys
9.049391
5.776707
1.566531
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.download_dataset_with_http_info(owner, id, **kwargs) else: (data) = self.download_dataset_with_http_info(owner, id, **kwargs) return data
def download_dataset(self, owner, id, **kwargs)
Download dataset This endpoint will return a .zip containing all files within the dataset as originally uploaded. If you are interested retrieving clean data extracted from those files by data.world, check out `GET:/sql` and `GET:/sparql`. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.download_dataset(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: None If the method is called asynchronously, returns the request thread.
1.492003
1.561113
0.95573
kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.download_file_with_http_info(owner, id, file, **kwargs) else: (data) = self.download_file_with_http_info(owner, id, file, **kwargs) return data
def download_file(self, owner, id, file, **kwargs)
Download file This endpoint will return a file within the dataset as originally uploaded. If you are interested retrieving clean data extracted from those files by data.world, check out `GET:/sql` and `GET:/sparql`. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.download_file(owner, id, file, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param str file: (required) :return: None If the method is called asynchronously, returns the request thread.
1.480058
1.571808
0.941628
if count is None: raise ValueError("Invalid value for `count`, must not be `None`") if count is not None and count < 0: raise ValueError("Invalid value for `count`, must be a value greater than or equal to `0`") self._count = count
def count(self, count)
Sets the count of this PaginatedDatasetResults. :param count: The count of this PaginatedDatasetResults. :type: int
1.741812
1.665085
1.04608
if type is None: raise ValueError("Invalid value for `type`, must not be `None`") if type is not None and len(type) > 50: raise ValueError("Invalid value for `type`, length must be less than or equal to `50`") if type is not None and not re.search('[\\x21-\\x7E \\t]*', type): raise ValueError("Invalid value for `type`, must be a follow pattern or equal to `/[\\x21-\\x7E \\t]*/`") self._type = type
def type(self, type)
Sets the type of this WebAuthorization. The authorization scheme. Usually this is \"Bearer\" but it could be other values like \"Token\" or \"Basic\" etc. :param type: The type of this WebAuthorization. :type: str
1.96118
1.756863
1.116297
if credentials is not None and len(credentials) > 1024: raise ValueError("Invalid value for `credentials`, length must be less than or equal to `1024`") if credentials is not None and len(credentials) < 1: raise ValueError("Invalid value for `credentials`, length must be greater than or equal to `1`") if credentials is not None and not re.search('[\\x21-\\x7E \\t]*', credentials): raise ValueError("Invalid value for `credentials`, must be a follow pattern or equal to `/[\\x21-\\x7E \\t]*/`") self._credentials = credentials
def credentials(self, credentials)
Sets the credentials of this WebAuthorization. The confidential portion of the `Authorization` header that follows the `type` field. This field is write-only. It is omitted by read operations. If authorization is required, the `credentials` value must be provided whenever a File Source is created or modified. An update to a dataset that does not change the File Source may omit the `credentials` field--the update will preserve the previous value. :param credentials: The credentials of this WebAuthorization. :type: str
1.803018
1.760103
1.024382
if resource is None: # Show simpler descriptor, omitting schema definitions simple_descriptor = copy.deepcopy(self._datapackage.descriptor) for resource in simple_descriptor['resources']: resource.pop('schema', None) return simple_descriptor else: return self.__resources[resource].descriptor
def describe(self, resource=None)
Describe dataset or resource within dataset :param resource: The name of a specific resource (i.e. file or table) contained in the dataset. If ``resource`` is None, this method will describe the dataset itself. (Default value = None) :type resource: str, optional :returns: The descriptor of the dataset or of a specific resource, if ``resource`` is specified in the call. :rtype: dict
6.160264
6.397126
0.962974
# Instantiating the resource again as a simple `Resource` ensures that # ``data`` will be returned as bytes. upcast_resource = datapackage.Resource( self.__resources[resource_name].descriptor, default_base_path=self.__base_path) return upcast_resource.data
def _load_raw_data(self, resource_name)
Extract raw data from resource :param resource_name:
11.492061
12.459993
0.922317