body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def __init__(self, **kwargs):
'\n Initialize a new cuckoo search problem.\n '
self.__upper_boundary = kwargs.get('upper_boundary', 4.0)
self.__lower_boundary = kwargs.get('lower_boundary', 0.0)
self.__alpha = kwargs.pop('alpha', 1)
self.__max_generations = kwargs.pop('max_generations',... | 7,076,526,501,844,270,000 | Initialize a new cuckoo search problem. | swarmlib/cuckoosearch/cuckoo_problem.py | __init__ | Geetha-github-cloud/swarmlib | python | def __init__(self, **kwargs):
'\n \n '
self.__upper_boundary = kwargs.get('upper_boundary', 4.0)
self.__lower_boundary = kwargs.get('lower_boundary', 0.0)
self.__alpha = kwargs.pop('alpha', 1)
self.__max_generations = kwargs.pop('max_generations', 10)
self.__lambda = kwargs.pop('la... |
def replay(self):
'\n Start the problems visualization.\n '
self.__visualizer.replay() | -2,730,549,339,398,622,700 | Start the problems visualization. | swarmlib/cuckoosearch/cuckoo_problem.py | replay | Geetha-github-cloud/swarmlib | python | def replay(self):
'\n \n '
self.__visualizer.replay() |
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
'A better wrapper over request for deferred signing'
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
... | -5,809,463,524,355,869,000 | A better wrapper over request for deferred signing | python/ccxt/base/exchange.py | fetch2 | tssujt/ccxt | python | def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['metho... |
def find_broadly_matched_key(self, broad, string):
'A helper method for matching error strings exactly vs broadly'
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if (string.find(key) >= 0):
return key
return None | 1,118,882,194,763,658,900 | A helper method for matching error strings exactly vs broadly | python/ccxt/base/exchange.py | find_broadly_matched_key | tssujt/ccxt | python | def find_broadly_matched_key(self, broad, string):
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if (string.find(key) >= 0):
return key
return None |
def fetch(self, url, method='GET', headers=None, body=None):
'Perform a HTTP request and return decoded JSON data'
request_headers = self.prepare_request_headers(headers)
url = (self.proxy + url)
if self.verbose:
print('\nRequest:', method, url, request_headers, body)
self.logger.debug('%s %... | 5,832,230,086,645,174,000 | Perform a HTTP request and return decoded JSON data | python/ccxt/base/exchange.py | fetch | tssujt/ccxt | python | def fetch(self, url, method='GET', headers=None, body=None):
request_headers = self.prepare_request_headers(headers)
url = (self.proxy + url)
if self.verbose:
print('\nRequest:', method, url, request_headers, body)
self.logger.debug('%s %s, Request: %s %s', method, url, request_headers, bod... |
@staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
'A helper-wrapper for the safe_value_2() family.'
value = method(dictionary, key1)
return (value if (value is not None) else method(dictionary, key2, default_value)) | -2,371,737,021,285,098,500 | A helper-wrapper for the safe_value_2() family. | python/ccxt/base/exchange.py | safe_either | tssujt/ccxt | python | @staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
value = method(dictionary, key1)
return (value if (value is not None) else method(dictionary, key2, default_value)) |
@staticmethod
def truncate(num, precision=0):
'Deprecated, use decimal_to_precision instead'
if (precision > 0):
decimal_precision = math.pow(10, precision)
return (math.trunc((num * decimal_precision)) / decimal_precision)
return int(Exchange.truncate_to_string(num, precision)) | 5,881,430,384,757,220,000 | Deprecated, use decimal_to_precision instead | python/ccxt/base/exchange.py | truncate | tssujt/ccxt | python | @staticmethod
def truncate(num, precision=0):
if (precision > 0):
decimal_precision = math.pow(10, precision)
return (math.trunc((num * decimal_precision)) / decimal_precision)
return int(Exchange.truncate_to_string(num, precision)) |
@staticmethod
def truncate_to_string(num, precision=0):
'Deprecated, todo: remove references from subclasses'
if (precision > 0):
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = (decimal_digits if len(dec... | -3,156,627,279,850,857,000 | Deprecated, todo: remove references from subclasses | python/ccxt/base/exchange.py | truncate_to_string | tssujt/ccxt | python | @staticmethod
def truncate_to_string(num, precision=0):
if (precision > 0):
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = (decimal_digits if len(decimal_digits) else '0')
return ((parts[0] + '.... |
def check_address(self, address):
'Checks an address is not the same character repeated or an empty sequence'
if (address is None):
self.raise_error(InvalidAddress, details='address is None')
if (all(((letter == address[0]) for letter in address)) or (len(address) < self.minFundingAddressLength) or ... | -2,909,175,738,945,414,700 | Checks an address is not the same character repeated or an empty sequence | python/ccxt/base/exchange.py | check_address | tssujt/ccxt | python | def check_address(self, address):
if (address is None):
self.raise_error(InvalidAddress, details='address is None')
if (all(((letter == address[0]) for letter in address)) or (len(address) < self.minFundingAddressLength) or (' ' in address)):
self.raise_error(InvalidAddress, details=(((('ad... |
def __init__(self, file):
' Init audio stream '
self.file = file | -1,504,669,398,592,276,500 | Init audio stream | AudioFile.py | __init__ | CoryXie/SpeechShadowing | python | def __init__(self, file):
' '
self.file = file |
def play(self):
' Play entire file '
utils.displayInfoMessage('Playing Audio')
pathparts = self.file.rsplit('.', 1)
fileformat = pathparts[1]
song = AudioSegment.from_file(self.file, format=fileformat)
play(song)
utils.displayInfoMessage('')
utils.displayErrorMessage('') | -74,452,650,981,497,420 | Play entire file | AudioFile.py | play | CoryXie/SpeechShadowing | python | def play(self):
' '
utils.displayInfoMessage('Playing Audio')
pathparts = self.file.rsplit('.', 1)
fileformat = pathparts[1]
song = AudioSegment.from_file(self.file, format=fileformat)
play(song)
utils.displayInfoMessage()
utils.displayErrorMessage() |
def send_commands(mqtt_client, command_topic, commands):
'Send a sequence of commands.'
backlog_topic = (command_topic + COMMAND_BACKLOG)
backlog = ';'.join([('NoDelay;%s %s' % command) for command in commands])
mqtt_client.publish(backlog_topic, backlog) | 2,324,701,565,265,341,000 | Send a sequence of commands. | hatasmota/mqtt.py | send_commands | ascillato/hatasmota | python | def send_commands(mqtt_client, command_topic, commands):
backlog_topic = (command_topic + COMMAND_BACKLOG)
backlog = ';'.join([('NoDelay;%s %s' % command) for command in commands])
mqtt_client.publish(backlog_topic, backlog) |
def cancel(self):
'Cancel the timer.'
self._task.cancel() | 4,089,125,113,064,289,000 | Cancel the timer. | hatasmota/mqtt.py | cancel | ascillato/hatasmota | python | def cancel(self):
self._task.cancel() |
def __init__(self, publish, subscribe, unsubscribe):
'Initialize.'
self._pending_messages = {}
self._publish = publish
self._subscribe = subscribe
self._unsubscribe = unsubscribe | -6,452,863,049,671,550,000 | Initialize. | hatasmota/mqtt.py | __init__ | ascillato/hatasmota | python | def __init__(self, publish, subscribe, unsubscribe):
self._pending_messages = {}
self._publish = publish
self._subscribe = subscribe
self._unsubscribe = unsubscribe |
def publish(self, *args, **kwds):
'Publish a message.'
return self._publish(*args, **kwds) | -3,842,568,635,347,020,300 | Publish a message. | hatasmota/mqtt.py | publish | ascillato/hatasmota | python | def publish(self, *args, **kwds):
return self._publish(*args, **kwds) |
def publish_debounced(self, topic, payload, qos=None, retain=None):
'Publish a message, with debounce.'
msg = Message(topic, payload, qos, retain)
def publish_callback():
_LOGGER.debug('publish_debounced: publishing %s', msg)
self._pending_messages.pop(msg)
self.publish(msg.topic, m... | 7,393,002,072,308,514,000 | Publish a message, with debounce. | hatasmota/mqtt.py | publish_debounced | ascillato/hatasmota | python | def publish_debounced(self, topic, payload, qos=None, retain=None):
msg = Message(topic, payload, qos, retain)
def publish_callback():
_LOGGER.debug('publish_debounced: publishing %s', msg)
self._pending_messages.pop(msg)
self.publish(msg.topic, msg.payload, qos=msg.qos, retain=msg... |
async def subscribe(self, sub_state, topics):
'Subscribe to topics.'
return (await self._subscribe(sub_state, topics)) | 1,127,118,368,039,434,400 | Subscribe to topics. | hatasmota/mqtt.py | subscribe | ascillato/hatasmota | python | async def subscribe(self, sub_state, topics):
return (await self._subscribe(sub_state, topics)) |
async def unsubscribe(self, sub_state):
'Unsubscribe from topics.'
return (await self._unsubscribe(sub_state)) | -3,378,789,737,602,925,600 | Unsubscribe from topics. | hatasmota/mqtt.py | unsubscribe | ascillato/hatasmota | python | async def unsubscribe(self, sub_state):
return (await self._unsubscribe(sub_state)) |
def _reward(self, i, rewards, reward=1):
'\n Compute the reward to be given upon success\n '
for (j, a) in enumerate(self.agents):
if ((a.index == i) or (a.index == 0)):
rewards[j] += reward
if self.zero_sum:
if ((a.index != i) or (a.index == 0)):
... | -7,247,224,356,617,500,000 | Compute the reward to be given upon success | gym_multigrid/envs/collect_game.py | _reward | ArnaudFickinger/gym-multigrid | python | def _reward(self, i, rewards, reward=1):
'\n \n '
for (j, a) in enumerate(self.agents):
if ((a.index == i) or (a.index == 0)):
rewards[j] += reward
if self.zero_sum:
if ((a.index != i) or (a.index == 0)):
rewards[j] -= reward |
@classmethod
def host(cls) -> str:
' get the host of the url, so we can use the correct scraper '
raise NotImplementedError('This should be implemented.') | 1,255,193,424,983,882,800 | get the host of the url, so we can use the correct scraper | recipe_scrapers/_abstract.py | host | AlexRogalskiy/recipe-scrapers | python | @classmethod
def host(cls) -> str:
' '
raise NotImplementedError('This should be implemented.') |
def total_time(self):
' total time it takes to preparate the recipe in minutes '
raise NotImplementedError('This should be implemented.') | -7,147,276,316,743,142,000 | total time it takes to preparate the recipe in minutes | recipe_scrapers/_abstract.py | total_time | AlexRogalskiy/recipe-scrapers | python | def total_time(self):
' '
raise NotImplementedError('This should be implemented.') |
def yields(self):
' The number of servings or items in the recipe '
raise NotImplementedError('This should be implemented.') | -5,047,820,617,410,046,000 | The number of servings or items in the recipe | recipe_scrapers/_abstract.py | yields | AlexRogalskiy/recipe-scrapers | python | def yields(self):
' '
raise NotImplementedError('This should be implemented.') |
def language(self):
'\n Human language the recipe is written in.\n\n May be overridden by individual scrapers.\n '
candidate_languages = OrderedDict()
html = self.soup.find('html', {'lang': True})
candidate_languages[html.get('lang')] = True
meta_language = (self.soup.find('meta... | -5,964,747,132,220,465,000 | Human language the recipe is written in.
May be overridden by individual scrapers. | recipe_scrapers/_abstract.py | language | AlexRogalskiy/recipe-scrapers | python | def language(self):
'\n Human language the recipe is written in.\n\n May be overridden by individual scrapers.\n '
candidate_languages = OrderedDict()
html = self.soup.find('html', {'lang': True})
candidate_languages[html.get('lang')] = True
meta_language = (self.soup.find('meta... |
def sigmoid_cross_entropy_with_logits(logits, targets, name=None):
'Computes sigmoid cross entropy given `logits`.\n\n Measures the probability error in discrete classification tasks in which each\n class is independent and not mutually exclusive. For instance, one could\n perform multilabel classification wher... | -2,597,133,487,863,943,000 | Computes sigmoid cross entropy given `logits`.
Measures the probability error in discrete classification tasks in which each
class is independent and not mutually exclusive. For instance, one could
perform multilabel classification where a picture can contain both an elephant
and a dog at the same time.
For brevity,... | tensorflow/python/ops/nn.py | sigmoid_cross_entropy_with_logits | AdityaPai2398/tensorflow | python | def sigmoid_cross_entropy_with_logits(logits, targets, name=None):
'Computes sigmoid cross entropy given `logits`.\n\n Measures the probability error in discrete classification tasks in which each\n class is independent and not mutually exclusive. For instance, one could\n perform multilabel classification wher... |
def weighted_cross_entropy_with_logits(logits, targets, pos_weight, name=None):
'Computes a weighted cross entropy.\n\n This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,\n allows one to trade off recall and precision by up- or down-weighting the\n cost of a positive error relative to a... | 8,742,524,507,999,195,000 | Computes a weighted cross entropy.
This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,
allows one to trade off recall and precision by up- or down-weighting the
cost of a positive error relative to a negative error.
The usual cross-entropy cost is defined as:
targets * -log(sigmoid(logits)... | tensorflow/python/ops/nn.py | weighted_cross_entropy_with_logits | AdityaPai2398/tensorflow | python | def weighted_cross_entropy_with_logits(logits, targets, pos_weight, name=None):
'Computes a weighted cross entropy.\n\n This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,\n allows one to trade off recall and precision by up- or down-weighting the\n cost of a positive error relative to a... |
def relu_layer(x, weights, biases, name=None):
'Computes Relu(x * weight + biases).\n\n Args:\n x: a 2D tensor. Dimensions typically: batch, in_units\n weights: a 2D tensor. Dimensions typically: in_units, out_units\n biases: a 1D tensor. Dimensions: out_units\n name: A name for the operation (optio... | -4,549,435,547,551,919,000 | Computes Relu(x * weight + biases).
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"nn_relu_layer" is used.
Returns:
A 2... | tensorflow/python/ops/nn.py | relu_layer | AdityaPai2398/tensorflow | python | def relu_layer(x, weights, biases, name=None):
'Computes Relu(x * weight + biases).\n\n Args:\n x: a 2D tensor. Dimensions typically: batch, in_units\n weights: a 2D tensor. Dimensions typically: in_units, out_units\n biases: a 1D tensor. Dimensions: out_units\n name: A name for the operation (optio... |
def l2_normalize(x, dim, epsilon=1e-12, name=None):
'Normalizes along dimension `dim` using an L2 norm.\n\n For a 1-D tensor with `dim = 0`, computes\n\n output = x / sqrt(max(sum(x**2), epsilon))\n\n For `x` with more dimensions, independently normalizes each 1-D slice along\n dimension `dim`.\n\n Args:\n... | -620,941,079,581,741,000 | Normalizes along dimension `dim` using an L2 norm.
For a 1-D tensor with `dim = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `dim`.
Args:
x: A `Tensor`.
dim: Dimension along which to normalize.
epsilon: A lowe... | tensorflow/python/ops/nn.py | l2_normalize | AdityaPai2398/tensorflow | python | def l2_normalize(x, dim, epsilon=1e-12, name=None):
'Normalizes along dimension `dim` using an L2 norm.\n\n For a 1-D tensor with `dim = 0`, computes\n\n output = x / sqrt(max(sum(x**2), epsilon))\n\n For `x` with more dimensions, independently normalizes each 1-D slice along\n dimension `dim`.\n\n Args:\n... |
def zero_fraction(value, name=None):
"Returns the fraction of zeros in `value`.\n\n If `value` is empty, the result is `nan`.\n\n This is useful in summaries to measure and report sparsity. For example,\n\n z = tf.Relu(...)\n summ = tf.scalar_summary('sparsity', tf.nn.zero_fraction(z))\n\n Args:\n ... | 8,074,424,809,428,103,000 | Returns the fraction of zeros in `value`.
If `value` is empty, the result is `nan`.
This is useful in summaries to measure and report sparsity. For example,
z = tf.Relu(...)
summ = tf.scalar_summary('sparsity', tf.nn.zero_fraction(z))
Args:
value: A tensor of numeric type.
name: A name for the operatio... | tensorflow/python/ops/nn.py | zero_fraction | AdityaPai2398/tensorflow | python | def zero_fraction(value, name=None):
"Returns the fraction of zeros in `value`.\n\n If `value` is empty, the result is `nan`.\n\n This is useful in summaries to measure and report sparsity. For example,\n\n z = tf.Relu(...)\n summ = tf.scalar_summary('sparsity', tf.nn.zero_fraction(z))\n\n Args:\n ... |
def depthwise_conv2d(input, filter, strides, padding, name=None):
"Depthwise 2-D convolution.\n\n Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\n and a filter tensor of shape\n `[filter_height, filter_width, in_channels, channel_multiplier]`\n containing `in_channels` convolutional ... | -9,087,105,612,821,949,000 | Depthwise 2-D convolution.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`
containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`
applies a different filter to each input chan... | tensorflow/python/ops/nn.py | depthwise_conv2d | AdityaPai2398/tensorflow | python | def depthwise_conv2d(input, filter, strides, padding, name=None):
"Depthwise 2-D convolution.\n\n Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\n and a filter tensor of shape\n `[filter_height, filter_width, in_channels, channel_multiplier]`\n containing `in_channels` convolutional ... |
def separable_conv2d(input, depthwise_filter, pointwise_filter, strides, padding, name=None):
"2-D convolution with separable filters.\n\n Performs a depthwise convolution that acts separately on channels followed by\n a pointwise convolution that mixes channels. Note that this is separability\n between dimensi... | 9,064,386,940,410,162,000 | 2-D convolution with separable filters.
Performs a depthwise convolution that acts separately on channels followed by
a pointwise convolution that mixes channels. Note that this is separability
between dimensions `[1, 2]` and `3`, not spatial separability between
dimensions `1` and `2`.
In detail,
output[b, i, ... | tensorflow/python/ops/nn.py | separable_conv2d | AdityaPai2398/tensorflow | python | def separable_conv2d(input, depthwise_filter, pointwise_filter, strides, padding, name=None):
"2-D convolution with separable filters.\n\n Performs a depthwise convolution that acts separately on channels followed by\n a pointwise convolution that mixes channels. Note that this is separability\n between dimensi... |
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None):
"Calculate the sufficient statistics for the mean and variance of `x`.\n\n These sufficient statistics are computed using the one pass algorithm on\n an input that's optionally shifted. See:\n https://en.wikipedia.org/wiki/Algorithms_for... | -59,927,612,229,581,570 | Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
Args:
x: A `Tensor`.
axes: Array of ints.... | tensorflow/python/ops/nn.py | sufficient_statistics | AdityaPai2398/tensorflow | python | def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None):
"Calculate the sufficient statistics for the mean and variance of `x`.\n\n These sufficient statistics are computed using the one pass algorithm on\n an input that's optionally shifted. See:\n https://en.wikipedia.org/wiki/Algorithms_for... |
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
'Calculate the mean and variance of based on the sufficient statistics.\n\n Args:\n counts: A `Tensor` containing a the total count of the data (one value).\n mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly\n ... | 6,797,078,140,429,583,000 | Calculate the mean and variance of based on the sufficient statistics.
Args:
counts: A `Tensor` containing a the total count of the data (one value).
mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly
shifted) sum of the elements to average over.
variance_ss: A `Tensor` containing t... | tensorflow/python/ops/nn.py | normalize_moments | AdityaPai2398/tensorflow | python | def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
'Calculate the mean and variance of based on the sufficient statistics.\n\n Args:\n counts: A `Tensor` containing a the total count of the data (one value).\n mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly\n ... |
def moments(x, axes, shift=None, name=None, keep_dims=False):
'Calculate the mean and variance of `x`.\n\n The mean and variance are calculated by aggregating the contents of `x`\n across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean\n and variance of a vector.\n\n When using these moments for b... | -2,044,667,341,312,066,600 | Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global n... | tensorflow/python/ops/nn.py | moments | AdityaPai2398/tensorflow | python | def moments(x, axes, shift=None, name=None, keep_dims=False):
'Calculate the mean and variance of `x`.\n\n The mean and variance are calculated by aggregating the contents of `x`\n across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean\n and variance of a vector.\n\n When using these moments for b... |
def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None):
"Batch normalization.\n\n As described in http://arxiv.org/abs/1502.03167.\n Normalizes a tensor by `mean` and `variance`, and applies (optionally) a\n `scale` \\\\(\\gamma\\\\) to it, as well as an `offset` \\\\(\\beta\\\\):... | 4,443,138,785,886,978,000 | Batch normalization.
As described in http://arxiv.org/abs/1502.03167.
Normalizes a tensor by `mean` and `variance`, and applies (optionally) a
`scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\):
\\(\frac{\gamma(x-\mu)}{\sigma}+\beta\\)
`mean`, `variance`, `offset` and `scale` are all expected to be of on... | tensorflow/python/ops/nn.py | batch_normalization | AdityaPai2398/tensorflow | python | def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None):
"Batch normalization.\n\n As described in http://arxiv.org/abs/1502.03167.\n Normalizes a tensor by `mean` and `variance`, and applies (optionally) a\n `scale` \\\\(\\gamma\\\\) to it, as well as an `offset` \\\\(\\beta\\\\):... |
def batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None):
'Batch normalization.\n\n This op is deprecated. See `tf.nn.batch_normalization`.\n\n Args:\n t: A 4D input Tensor.\n m: A 1D mean Tensor with size matching the last dimension of t.\n ... | 4,882,801,512,902,475,000 | Batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
Args:
t: A 4D input Tensor.
m: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
v: A 1D variance Tensor with size matching the last dim... | tensorflow/python/ops/nn.py | batch_norm_with_global_normalization | AdityaPai2398/tensorflow | python | def batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None):
'Batch normalization.\n\n This op is deprecated. See `tf.nn.batch_normalization`.\n\n Args:\n t: A 4D input Tensor.\n m: A 1D mean Tensor with size matching the last dimension of t.\n ... |
def _sum_rows(x):
'Returns a vector summing up each row of the matrix x.'
cols = array_ops.shape(x)[1]
ones_shape = array_ops.pack([cols, 1])
ones = array_ops.ones(ones_shape, x.dtype)
return array_ops.reshape(math_ops.matmul(x, ones), [(- 1)]) | 1,137,400,891,671,356,800 | Returns a vector summing up each row of the matrix x. | tensorflow/python/ops/nn.py | _sum_rows | AdityaPai2398/tensorflow | python | def _sum_rows(x):
cols = array_ops.shape(x)[1]
ones_shape = array_ops.pack([cols, 1])
ones = array_ops.ones(ones_shape, x.dtype)
return array_ops.reshape(math_ops.matmul(x, ones), [(- 1)]) |
def _compute_sampled_logits(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, subtract_log_q=True, remove_accidental_hits=False, partition_strategy='mod', name=None):
'Helper function for nce_loss and sampled_softmax_loss functions.\n\n Computes sampled output training log... | 3,862,293,874,763,613,000 | Helper function for nce_loss and sampled_softmax_loss functions.
Computes sampled output training logits and labels suitable for implementing
e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see
sampled_softmax_loss).
Note: In the case where num_true > 1, we assign to each target class
the target ... | tensorflow/python/ops/nn.py | _compute_sampled_logits | AdityaPai2398/tensorflow | python | def _compute_sampled_logits(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, subtract_log_q=True, remove_accidental_hits=False, partition_strategy='mod', name=None):
'Helper function for nce_loss and sampled_softmax_loss functions.\n\n Computes sampled output training log... |
def nce_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=False, partition_strategy='mod', name='nce_loss'):
'Computes and returns the noise-contrastive estimation training loss.\n\n See [Noise-contrastive estimation: A new estimation principle ... | 5,876,890,148,579,109,000 | Computes and returns the noise-contrastive estimation training loss.
See [Noise-contrastive estimation: A new estimation principle for
unnormalized statistical models]
(http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).
Also see our [Candidate Sampling Algorithms Reference]
(../../extras/candidate_s... | tensorflow/python/ops/nn.py | nce_loss | AdityaPai2398/tensorflow | python | def nce_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=False, partition_strategy='mod', name='nce_loss'):
'Computes and returns the noise-contrastive estimation training loss.\n\n See [Noise-contrastive estimation: A new estimation principle ... |
def sampled_softmax_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, partition_strategy='mod', name='sampled_softmax_loss'):
'Computes and returns the sampled softmax training loss.\n\n This is a faster way to train a softmax classifier o... | -82,977,646,637,382,370 | Computes and returns the sampled softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
At inference time, you can compute full softmax probabilities with the
expression ... | tensorflow/python/ops/nn.py | sampled_softmax_loss | AdityaPai2398/tensorflow | python | def sampled_softmax_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, partition_strategy='mod', name='sampled_softmax_loss'):
'Computes and returns the sampled softmax training loss.\n\n This is a faster way to train a softmax classifier o... |
def get_data(img_pth: Union[(str, os.PathLike)]) -> dict:
'Get a single data from the given file.json path'
with open(img_pth, 'r') as f:
data = json.load(f)
return data | 6,528,922,999,603,494,000 | Get a single data from the given file.json path | analyze_dataset.py | get_data | PDillis/coiltraine | python | def get_data(img_pth: Union[(str, os.PathLike)]) -> dict:
with open(img_pth, 'r') as f:
data = json.load(f)
return data |
def get_original_df(path: Union[(str, os.PathLike)], filename: str, processes_per_cpu: int=2) -> Tuple[(pd.DataFrame, bool)]:
'Get a DataFrame from all the can_bus*.json files in the dataset'
save_path = os.path.join(os.getcwd(), 'data_analysis', filename)
if os.path.isfile(save_path):
print('.npy f... | -2,909,380,231,971,924,000 | Get a DataFrame from all the can_bus*.json files in the dataset | analyze_dataset.py | get_original_df | PDillis/coiltraine | python | def get_original_df(path: Union[(str, os.PathLike)], filename: str, processes_per_cpu: int=2) -> Tuple[(pd.DataFrame, bool)]:
save_path = os.path.join(os.getcwd(), 'data_analysis', filename)
if os.path.isfile(save_path):
print('.npy file exists, loading it...')
data = list(np.load(save_path... |
def get_augmented_df(preloads_name: str) -> Tuple[(pd.DataFrame, bool)]:
"Use the preloads file to load the data; will be augmented, as that's what we did"
assert preloads_name.endswith('.npy')
data = np.load(os.path.join(os.getcwd(), '_preloads', preloads_name), allow_pickle=True)[1]
df = pd.DataFrame(... | 6,811,287,361,663,459,000 | Use the preloads file to load the data; will be augmented, as that's what we did | analyze_dataset.py | get_augmented_df | PDillis/coiltraine | python | def get_augmented_df(preloads_name: str) -> Tuple[(pd.DataFrame, bool)]:
assert preloads_name.endswith('.npy')
data = np.load(os.path.join(os.getcwd(), '_preloads', preloads_name), allow_pickle=True)[1]
df = pd.DataFrame(data)
print(df.describe())
return (df, True) |
def violin_plot(df: pd.DataFrame, save_name: str, augmented: bool) -> None:
'Save violin plot for the interesting parameters using df'
directions_dict = {'No Action': 2.0, 'Turn Left': 3.0, 'Turn Right': 4.0, 'Continue Straight': 5.0}
def set_lines(ax):
for l in ax.lines:
l.set_linestyl... | 3,672,524,993,753,016,000 | Save violin plot for the interesting parameters using df | analyze_dataset.py | violin_plot | PDillis/coiltraine | python | def violin_plot(df: pd.DataFrame, save_name: str, augmented: bool) -> None:
directions_dict = {'No Action': 2.0, 'Turn Left': 3.0, 'Turn Right': 4.0, 'Continue Straight': 5.0}
def set_lines(ax):
for l in ax.lines:
l.set_linestyle('--')
l.set_linewidth(0.6)
l.set... |
def plot_clients(path: Union[(str, os.PathLike)], df: pd.DataFrame, augmented: bool, speed_factor: float) -> None:
'Plot the steer, throttle, brake, and speed of a client during its data collection'
if path.endswith(os.sep):
path = path[:(- 1)]
dataset_name = os.path.basename(path)
s_path = os.p... | -3,650,115,691,062,344,700 | Plot the steer, throttle, brake, and speed of a client during its data collection | analyze_dataset.py | plot_clients | PDillis/coiltraine | python | def plot_clients(path: Union[(str, os.PathLike)], df: pd.DataFrame, augmented: bool, speed_factor: float) -> None:
if path.endswith(os.sep):
path = path[:(- 1)]
dataset_name = os.path.basename(path)
s_path = os.path.join(os.getcwd(), 'data_analysis', dataset_name, 'clients')
os.makedirs(s_p... |
def get_change_locs(df: pd.DataFrame, cli: int) -> Tuple[(List[int], List[float])]:
'Get the index and directions from the df of the actions taken by the client'
df['directions_str'] = df['directions'].astype(str)
df['change'] = (df['directions_str'].shift(1, fill_value=df['directions_str'].head(1)) != df['... | -2,207,295,983,396,975,400 | Get the index and directions from the df of the actions taken by the client | analyze_dataset.py | get_change_locs | PDillis/coiltraine | python | def get_change_locs(df: pd.DataFrame, cli: int) -> Tuple[(List[int], List[float])]:
df['directions_str'] = df['directions'].astype(str)
df['change'] = (df['directions_str'].shift(1, fill_value=df['directions_str'].head(1)) != df['directions_str'])
index_change = list(df.loc[(df['change'] == True)].inde... |
def t_NUMBER(t):
'[0-9]+'
return t | -5,521,826,655,453,105,000 | [0-9]+ | py_lex.py | t_NUMBER | Spico197/PythonCompilerPrinciplesExp | python | def t_NUMBER(t):
return t |
def t_PRINT(t):
'print'
return t | -3,596,005,817,379,416,000 | print | py_lex.py | t_PRINT | Spico197/PythonCompilerPrinciplesExp | python | def t_PRINT(t):
return t |
def t_IF(t):
'if'
return t | 2,975,524,291,271,362,600 | if | py_lex.py | t_IF | Spico197/PythonCompilerPrinciplesExp | python | def t_IF(t):
return t |
def t_WHILE(t):
'while'
return t | -8,815,080,414,704,908,000 | while | py_lex.py | t_WHILE | Spico197/PythonCompilerPrinciplesExp | python | def t_WHILE(t):
return t |
def t_FOR(t):
'for'
return t | -2,868,480,328,159,569,400 | for | py_lex.py | t_FOR | Spico197/PythonCompilerPrinciplesExp | python | def t_FOR(t):
return t |
def t_LEN(t):
'len'
return t | 995,836,586,919,926,800 | len | py_lex.py | t_LEN | Spico197/PythonCompilerPrinciplesExp | python | def t_LEN(t):
return t |
def t_INC(t):
'\\+\\+'
return t | 4,309,525,618,600,526,300 | \+\+ | py_lex.py | t_INC | Spico197/PythonCompilerPrinciplesExp | python | def t_INC(t):
'\\+\\+'
return t |
def t_GDIV(t):
'//'
return t | 153,917,572,362,196,000 | // | py_lex.py | t_GDIV | Spico197/PythonCompilerPrinciplesExp | python | def t_GDIV(t):
return t |
def t_BREAK(t):
'break'
return t | 5,680,340,504,264,076,000 | break | py_lex.py | t_BREAK | Spico197/PythonCompilerPrinciplesExp | python | def t_BREAK(t):
return t |
def t_LET(t):
'<='
return t | -8,775,522,863,221,156,000 | <= | py_lex.py | t_LET | Spico197/PythonCompilerPrinciplesExp | python | def t_LET(t):
return t |
def t_ELIF(t):
'elif'
return t | -4,815,384,646,013,666,000 | elif | py_lex.py | t_ELIF | Spico197/PythonCompilerPrinciplesExp | python | def t_ELIF(t):
return t |
def t_ELSE(t):
'else'
return t | -4,633,063,001,006,124,000 | else | py_lex.py | t_ELSE | Spico197/PythonCompilerPrinciplesExp | python | def t_ELSE(t):
return t |
def t_VARIABLE(t):
'[a-zA-Z_]+'
return t | 2,083,747,938,742,166,500 | [a-zA-Z_]+ | py_lex.py | t_VARIABLE | Spico197/PythonCompilerPrinciplesExp | python | def t_VARIABLE(t):
return t |
def run_task(task_message: str, command: str) -> None:
'Run a task in the shell, defined by a task message and its associated\n command.'
print(blue_bold(task_message))
print(light(f'$ {command}'))
subprocess.call(command, shell=True)
print() | -3,654,012,546,749,389,000 | Run a task in the shell, defined by a task message and its associated
command. | check_commit.py | run_task | Cocopyth/foodshare | python | def run_task(task_message: str, command: str) -> None:
'Run a task in the shell, defined by a task message and its associated\n command.'
print(blue_bold(task_message))
print(light(f'$ {command}'))
subprocess.call(command, shell=True)
print() |
def _uniqueColumns(self):
'\n raise exception if column names (cnames) are not unique\n '
if (len(set(self.table[0])) != len(self.table[0])):
raise Exception('Column names not unique') | -7,075,752,451,378,640,000 | raise exception if column names (cnames) are not unique | TableData.py | _uniqueColumns | mokko/Py-TableData | python | def _uniqueColumns(self):
'\n \n '
if (len(set(self.table[0])) != len(self.table[0])):
raise Exception('Column names not unique') |
def load_table(path, verbose=None):
'\n File extension aware ingester\n\n td=TableData.load_table(path)\n \n This is an alternative to _init_. Is this pythonic enough? \n '
ext = os.path.splitext(path)[1][1:]
return TableData(ext, path, verbose) | -6,098,475,671,010,790,000 | File extension aware ingester
td=TableData.load_table(path)
This is an alternative to _init_. Is this pythonic enough? | TableData.py | load_table | mokko/Py-TableData | python | def load_table(path, verbose=None):
'\n File extension aware ingester\n\n td=TableData.load_table(path)\n \n This is an alternative to _init_. Is this pythonic enough? \n '
ext = os.path.splitext(path)[1][1:]
return TableData(ext, path, verbose) |
def XLRDParser(self, infile):
"\n Parses old excel file into tableData object. Only first sheet.\n\n Dont use this directly, use \n td=TableData('xsl', infile)\n td=TableData.load=table(infile)\n instead\n \n xlrd uses UTF16. What comes out of here?\n ... | -2,494,149,109,274,382,000 | Parses old excel file into tableData object. Only first sheet.
Dont use this directly, use
td=TableData('xsl', infile)
td=TableData.load=table(infile)
instead
xlrd uses UTF16. What comes out of here?
TO DO:
1. better tests for
-Unicode issues not tested
-Excel data fields change appearance
2. conversion/tr... | TableData.py | XLRDParser | mokko/Py-TableData | python | def XLRDParser(self, infile):
"\n Parses old excel file into tableData object. Only first sheet.\n\n Dont use this directly, use \n td=TableData('xsl', infile)\n td=TableData.load=table(infile)\n instead\n \n xlrd uses UTF16. What comes out of here?\n ... |
def ncols(self):
'\n Returns integer with number of columns in table data\n '
return len(self.table[0]) | -1,986,639,562,952,319,500 | Returns integer with number of columns in table data | TableData.py | ncols | mokko/Py-TableData | python | def ncols(self):
'\n \n '
return len(self.table[0]) |
def nrows(self):
'\n Returns integer with number of rows in table data\n '
return len(self.table) | 3,428,862,989,251,994,600 | Returns integer with number of rows in table data | TableData.py | nrows | mokko/Py-TableData | python | def nrows(self):
'\n \n '
return len(self.table) |
def cell(self, col, row):
"\n Return a cell for col,row.\n td.cell(col,row)\n\n Throws exception if col or row are not integer or out of range.\n What happens on empty cell?\n \n I stick to x|y format, although row|col might be more pythonic.\n \n Empty ce... | 5,786,433,765,263,158,000 | Return a cell for col,row.
td.cell(col,row)
Throws exception if col or row are not integer or out of range.
What happens on empty cell?
I stick to x|y format, although row|col might be more pythonic.
Empty cell is '' not None. | TableData.py | cell | mokko/Py-TableData | python | def cell(self, col, row):
"\n Return a cell for col,row.\n td.cell(col,row)\n\n Throws exception if col or row are not integer or out of range.\n What happens on empty cell?\n \n I stick to x|y format, although row|col might be more pythonic.\n \n Empty ce... |
def cindex(self, needle):
"\n Returns the column index (c) for column name 'needle'.\n \n Throws 'not in list' if 'needle' is not a column name (cname).\n "
return self.table[0].index(needle) | -5,242,650,936,641,615,000 | Returns the column index (c) for column name 'needle'.
Throws 'not in list' if 'needle' is not a column name (cname). | TableData.py | cindex | mokko/Py-TableData | python | def cindex(self, needle):
"\n Returns the column index (c) for column name 'needle'.\n \n Throws 'not in list' if 'needle' is not a column name (cname).\n "
return self.table[0].index(needle) |
def search(self, needle):
'\n Returns list of cells [cid,rid] that contain the needle.\n r=td.search(needle) # (1,1)\n \n \n tuples, lists? I am not quite sure! \n '
results = []
for rid in range(0, self.nrows()):
for cid in range(0, self.ncols()):
... | -9,116,779,920,000,777,000 | Returns list of cells [cid,rid] that contain the needle.
r=td.search(needle) # (1,1)
tuples, lists? I am not quite sure! | TableData.py | search | mokko/Py-TableData | python | def search(self, needle):
'\n Returns list of cells [cid,rid] that contain the needle.\n r=td.search(needle) # (1,1)\n \n \n tuples, lists? I am not quite sure! \n '
results = []
for rid in range(0, self.nrows()):
for cid in range(0, self.ncols()):
... |
def search_col(self, cname, needle):
'\n Returns list/set of rows that contain the needle for the given col.\n td.search(cname, needle)\n '
results = ()
c = cindex(cname)
for rid in range(0, self.nrows()):
if (needle in self.cell(c, rid)):
results.append(rid) | 5,397,219,895,814,539,000 | Returns list/set of rows that contain the needle for the given col.
td.search(cname, needle) | TableData.py | search_col | mokko/Py-TableData | python | def search_col(self, cname, needle):
'\n Returns list/set of rows that contain the needle for the given col.\n td.search(cname, needle)\n '
results = ()
c = cindex(cname)
for rid in range(0, self.nrows()):
if (needle in self.cell(c, rid)):
results.append(rid) |
def show(self):
'\n print representation of table\n \n Really print? Why not.\n '
for row in self.table:
print(row)
print(('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows()))) | 6,122,015,028,421,865,000 | print representation of table
Really print? Why not. | TableData.py | show | mokko/Py-TableData | python | def show(self):
'\n print representation of table\n \n Really print? Why not.\n '
for row in self.table:
print(row)
print(('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows()))) |
def delRow(self, r):
'\n Drop a row by number.\n \n Need to remake the index to cover the hole.\n '
self.table.pop(r) | -463,386,055,434,054,660 | Drop a row by number.
Need to remake the index to cover the hole. | TableData.py | delRow | mokko/Py-TableData | python | def delRow(self, r):
'\n Drop a row by number.\n \n Need to remake the index to cover the hole.\n '
self.table.pop(r) |
def delCol(self, cname):
'\n Drop a column by cname\n \n (Not tested.)\n '
c = self.cindex(cname)
for r in range(0, self.nrows()):
self.table[r].pop(c) | 726,440,151,422,467,200 | Drop a column by cname
(Not tested.) | TableData.py | delCol | mokko/Py-TableData | python | def delCol(self, cname):
'\n Drop a column by cname\n \n (Not tested.)\n '
c = self.cindex(cname)
for r in range(0, self.nrows()):
self.table[r].pop(c) |
def addCol(self, name):
'\n Add a new column called name at the end of the row. \n Cells with be empty.\n\n Returns the cid of the new column, same as cindex(cname).\n '
self.table[0].append(name)
self._uniqueColumns()
for rid in range(1, self.nrows()):
self.table[rid... | 1,757,653,220,642,044,200 | Add a new column called name at the end of the row.
Cells with be empty.
Returns the cid of the new column, same as cindex(cname). | TableData.py | addCol | mokko/Py-TableData | python | def addCol(self, name):
'\n Add a new column called name at the end of the row. \n Cells with be empty.\n\n Returns the cid of the new column, same as cindex(cname).\n '
self.table[0].append(name)
self._uniqueColumns()
for rid in range(1, self.nrows()):
self.table[rid... |
def delCellAIfColBEq(self, cnameA, cnameB, needle):
'\n empty cell in column cnameA if value in column cnameB equals needle in every row\n \n untested\n '
colA = self.cindex(cnameA)
colB = self.cindex(cnameB)
for rid in range(1, self.nrows()):
if (self.table[rid][colB... | 4,673,846,665,272,713,000 | empty cell in column cnameA if value in column cnameB equals needle in every row
untested | TableData.py | delCellAIfColBEq | mokko/Py-TableData | python | def delCellAIfColBEq(self, cnameA, cnameB, needle):
'\n empty cell in column cnameA if value in column cnameB equals needle in every row\n \n untested\n '
colA = self.cindex(cnameA)
colB = self.cindex(cnameB)
for rid in range(1, self.nrows()):
if (self.table[rid][colB... |
def delRowIfColContains(self, cname, needle):
"\n Delete row if column equals the value 'needle'\n\n Should we use cname or c (colId)?\n "
col = self.cindex(cname)
r = (self.nrows() - 1)
while (r > 1):
cell = self.cell(r, col)
if (needle in str(cell)):
se... | 2,724,569,938,249,150,500 | Delete row if column equals the value 'needle'
Should we use cname or c (colId)? | TableData.py | delRowIfColContains | mokko/Py-TableData | python | def delRowIfColContains(self, cname, needle):
"\n Delete row if column equals the value 'needle'\n\n Should we use cname or c (colId)?\n "
col = self.cindex(cname)
r = (self.nrows() - 1)
while (r > 1):
cell = self.cell(r, col)
if (needle in str(cell)):
se... |
def renameCol(self, cnameOld, cnameNew):
'\n renames column cnameOld into cnameNew\n '
c = self.cindex(cnameOld)
self.table[0][c] = cnameNew | 150,716,984,456,689,950 | renames column cnameOld into cnameNew | TableData.py | renameCol | mokko/Py-TableData | python | def renameCol(self, cnameOld, cnameNew):
'\n \n '
c = self.cindex(cnameOld)
self.table[0][c] = cnameNew |
def default_per_col(cname, default_value):
"\n Default Value: if cell is empty replace with default value\n self.default_per_col ('status', 'filled')\n "
cid = td.cindex(cname)
for rid in range(1, td.nrows()):
if (not td.cell(cid, rid)):
self.table[rid][cid] = de... | 3,105,138,167,014,666,000 | Default Value: if cell is empty replace with default value
self.default_per_col ('status', 'filled') | TableData.py | default_per_col | mokko/Py-TableData | python | def default_per_col(cname, default_value):
"\n Default Value: if cell is empty replace with default value\n self.default_per_col ('status', 'filled')\n "
cid = td.cindex(cname)
for rid in range(1, td.nrows()):
if (not td.cell(cid, rid)):
self.table[rid][cid] = de... |
def write(self, out):
'\n write to file with extension-awareness\n '
ext = os.path.splitext(out)[1][1:].lower()
if (ext == 'xml'):
self.writeXML(out)
elif (ext == 'csv'):
self.writeCSV(out)
elif (ext == 'json'):
self.writeJSON(out)
else:
print(('Form... | -4,998,446,517,376,200,000 | write to file with extension-awareness | TableData.py | write | mokko/Py-TableData | python | def write(self, out):
'\n \n '
ext = os.path.splitext(out)[1][1:].lower()
if (ext == 'xml'):
self.writeXML(out)
elif (ext == 'csv'):
self.writeCSV(out)
elif (ext == 'json'):
self.writeJSON(out)
else:
print(('Format %s not recognized' % ext)) |
def writeCSV(self, outfile):
'\n writes data in tableData object to outfile in csv format\n \n Values with commas are quoted. \n '
import csv
self._outTest(outfile)
with open(outfile, mode='w', newline='', encoding='utf-8') as csvfile:
out = csv.writer(csvfile, dialec... | 2,598,109,210,853,169,000 | writes data in tableData object to outfile in csv format
Values with commas are quoted. | TableData.py | writeCSV | mokko/Py-TableData | python | def writeCSV(self, outfile):
'\n writes data in tableData object to outfile in csv format\n \n Values with commas are quoted. \n '
import csv
self._outTest(outfile)
with open(outfile, mode='w', newline=, encoding='utf-8') as csvfile:
out = csv.writer(csvfile, dialect=... |
def writeXML(self, out):
'\n writes table data to file out in xml format\n '
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
root = ET.Element('tdx')
self._outTest(out)
def _indent(elem, level=0):
i = ('\n' + (level * ' '))
if len(elem):
... | -4,343,073,205,336,348,700 | writes table data to file out in xml format | TableData.py | writeXML | mokko/Py-TableData | python | def writeXML(self, out):
'\n \n '
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
root = ET.Element('tdx')
self._outTest(out)
def _indent(elem, level=0):
i = ('\n' + (level * ' '))
if len(elem):
if ((not elem.text) or (not elem.t... |
def writeJSON(self, out):
"\n Writes table data in json to file out\n \n JSON doesn't have date type, hence default=str\n "
import json
self._outTest(out)
f = open(out, 'w')
with f as outfile:
json.dump(self.table, outfile, default=str)
self.verbose(('json wri... | -8,355,916,370,640,608,000 | Writes table data in json to file out
JSON doesn't have date type, hence default=str | TableData.py | writeJSON | mokko/Py-TableData | python | def writeJSON(self, out):
"\n Writes table data in json to file out\n \n JSON doesn't have date type, hence default=str\n "
import json
self._outTest(out)
f = open(out, 'w')
with f as outfile:
json.dump(self.table, outfile, default=str)
self.verbose(('json wri... |
def teams_add_user_to_team_by_batch_v1(self, add_user_to_team_by_batch_request, **kwargs):
'Add users to a team by batch # noqa: E501\n\n Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team\'s internal ID, use "Get all available Team... | 7,479,452,620,593,596,000 | Add users to a team by batch # noqa: E501
Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team's internal ID, use "Get all available Teams." # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP req... | pycherwell/api/teams_api.py | teams_add_user_to_team_by_batch_v1 | greenpau/pycherwell | python | def teams_add_user_to_team_by_batch_v1(self, add_user_to_team_by_batch_request, **kwargs):
'Add users to a team by batch # noqa: E501\n\n Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team\'s internal ID, use "Get all available Team... |
def teams_add_user_to_team_by_batch_v1_with_http_info(self, add_user_to_team_by_batch_request, **kwargs):
'Add users to a team by batch # noqa: E501\n\n Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team\'s internal ID, use "Get all... | -6,559,755,752,125,099,000 | Add users to a team by batch # noqa: E501
Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team's internal ID, use "Get all available Teams." # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP req... | pycherwell/api/teams_api.py | teams_add_user_to_team_by_batch_v1_with_http_info | greenpau/pycherwell | python | def teams_add_user_to_team_by_batch_v1_with_http_info(self, add_user_to_team_by_batch_request, **kwargs):
'Add users to a team by batch # noqa: E501\n\n Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team\'s internal ID, use "Get all... |
def teams_add_user_to_team_v1(self, add_user_to_team_request, **kwargs):
'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Teams." # noqa: ... | 5,251,959,941,015,588,000 | Add a user to a team # noqa: E501
Operation to add a user to a Team. To get the user's internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP... | pycherwell/api/teams_api.py | teams_add_user_to_team_v1 | greenpau/pycherwell | python | def teams_add_user_to_team_v1(self, add_user_to_team_request, **kwargs):
'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Teams." # noqa: ... |
def teams_add_user_to_team_v1_with_http_info(self, add_user_to_team_request, **kwargs):
'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Te... | -369,934,815,964,429,950 | Add a user to a team # noqa: E501
Operation to add a user to a Team. To get the user's internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP... | pycherwell/api/teams_api.py | teams_add_user_to_team_v1_with_http_info | greenpau/pycherwell | python | def teams_add_user_to_team_v1_with_http_info(self, add_user_to_team_request, **kwargs):
'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Te... |
def teams_add_user_to_team_v2(self, add_user_to_team_request, **kwargs):
'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Teams." # noqa: ... | -794,480,889,220,743,600 | Add a user to a team # noqa: E501
Operation to add a user to a Team. To get the user's internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP... | pycherwell/api/teams_api.py | teams_add_user_to_team_v2 | greenpau/pycherwell | python | def teams_add_user_to_team_v2(self, add_user_to_team_request, **kwargs):
'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Teams." # noqa: ... |
def teams_add_user_to_team_v2_with_http_info(self, add_user_to_team_request, **kwargs):
'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Te... | 6,207,243,840,170,479,000 | Add a user to a team # noqa: E501
Operation to add a user to a Team. To get the user's internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP... | pycherwell/api/teams_api.py | teams_add_user_to_team_v2_with_http_info | greenpau/pycherwell | python | def teams_add_user_to_team_v2_with_http_info(self, add_user_to_team_request, **kwargs):
'Add a user to a team # noqa: E501\n\n Operation to add a user to a Team. To get the user\'s internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team\'s internal ID, use "Get all available Te... |
def teams_delete_team_v1(self, teamid, **kwargs):
'Delete a Team # noqa: E501\n\n Operation to delete a Team by Team ID. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_d... | -7,163,944,717,466,169,000 | Delete a Team # noqa: E501
Operation to delete a Team by Team ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_delete_team_v1(teamid, async_req=True)
>>> result = thread.get()
:param async_req bool: exec... | pycherwell/api/teams_api.py | teams_delete_team_v1 | greenpau/pycherwell | python | def teams_delete_team_v1(self, teamid, **kwargs):
'Delete a Team # noqa: E501\n\n Operation to delete a Team by Team ID. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.teams_d... |
def teams_delete_team_v1_with_http_info(self, teamid, **kwargs):
'Delete a Team # noqa: E501\n\n Operation to delete a Team by Team ID. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> threa... | -2,856,693,656,659,794,000 | Delete a Team # noqa: E501
Operation to delete a Team by Team ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_delete_team_v1_with_http_info(teamid, async_req=True)
>>> result = thread.get()
:param async... | pycherwell/api/teams_api.py | teams_delete_team_v1_with_http_info | greenpau/pycherwell | python | def teams_delete_team_v1_with_http_info(self, teamid, **kwargs):
'Delete a Team # noqa: E501\n\n Operation to delete a Team by Team ID. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> threa... |
def teams_get_team_v1(self, teamid, **kwargs):
'Get a team by its TeamId # noqa: E501\n\n Operation to get Team Info for a single Team using its Team ID. To get a Team\'s internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or Team... | 1,252,965,615,686,424,600 | Get a team by its TeamId # noqa: E501
Operation to get Team Info for a single Team using its Team ID. To get a Team's internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501
This meth... | pycherwell/api/teams_api.py | teams_get_team_v1 | greenpau/pycherwell | python | def teams_get_team_v1(self, teamid, **kwargs):
'Get a team by its TeamId # noqa: E501\n\n Operation to get Team Info for a single Team using its Team ID. To get a Team\'s internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or Team... |
def teams_get_team_v1_with_http_info(self, teamid, **kwargs):
'Get a team by its TeamId # noqa: E501\n\n Operation to get Team Info for a single Team using its Team ID. To get a Team\'s internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM ... | -6,890,980,933,143,454,000 | Get a team by its TeamId # noqa: E501
Operation to get Team Info for a single Team using its Team ID. To get a Team's internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501
This meth... | pycherwell/api/teams_api.py | teams_get_team_v1_with_http_info | greenpau/pycherwell | python | def teams_get_team_v1_with_http_info(self, teamid, **kwargs):
'Get a team by its TeamId # noqa: E501\n\n Operation to get Team Info for a single Team using its Team ID. To get a Team\'s internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM ... |
def teams_get_teams_v1(self, **kwargs):
'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thr... | -6,170,501,011,833,153,000 | Get all available Teams # noqa: E501
Operation to get IDs and names for all available Teams. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_teams_v1(async_req=True)
>>> result = thread.get()
:param asy... | pycherwell/api/teams_api.py | teams_get_teams_v1 | greenpau/pycherwell | python | def teams_get_teams_v1(self, **kwargs):
'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thr... |
def teams_get_teams_v1_with_http_info(self, **kwargs):
'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n... | -2,534,725,942,223,735,000 | Get all available Teams # noqa: E501
Operation to get IDs and names for all available Teams. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_teams_v1_with_http_info(async_req=True)
>>> result = thread.ge... | pycherwell/api/teams_api.py | teams_get_teams_v1_with_http_info | greenpau/pycherwell | python | def teams_get_teams_v1_with_http_info(self, **kwargs):
'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n... |
def teams_get_teams_v2(self, **kwargs):
'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thr... | 8,171,048,766,474,264,000 | Get all available Teams # noqa: E501
Operation to get IDs and names for all available Teams. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_teams_v2(async_req=True)
>>> result = thread.get()
:param asy... | pycherwell/api/teams_api.py | teams_get_teams_v2 | greenpau/pycherwell | python | def teams_get_teams_v2(self, **kwargs):
'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thr... |
def teams_get_teams_v2_with_http_info(self, **kwargs):
'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n... | -1,948,090,591,928,988,000 | Get all available Teams # noqa: E501
Operation to get IDs and names for all available Teams. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_teams_v2_with_http_info(async_req=True)
>>> result = thread.ge... | pycherwell/api/teams_api.py | teams_get_teams_v2_with_http_info | greenpau/pycherwell | python | def teams_get_teams_v2_with_http_info(self, **kwargs):
'Get all available Teams # noqa: E501\n\n Operation to get IDs and names for all available Teams. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n... |
def teams_get_users_teams_v1(self, user_record_id, **kwargs):
'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HTTP request by d... | -6,103,791,973,675,825,000 | Get Team assignments for a user # noqa: E501
Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = ... | pycherwell/api/teams_api.py | teams_get_users_teams_v1 | greenpau/pycherwell | python | def teams_get_users_teams_v1(self, user_record_id, **kwargs):
'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HTTP request by d... |
def teams_get_users_teams_v1_with_http_info(self, user_record_id, **kwargs):
'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HT... | 4,781,741,475,574,834,000 | Get Team assignments for a user # noqa: E501
Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = ... | pycherwell/api/teams_api.py | teams_get_users_teams_v1_with_http_info | greenpau/pycherwell | python | def teams_get_users_teams_v1_with_http_info(self, user_record_id, **kwargs):
'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HT... |
def teams_get_users_teams_v2(self, user_record_id, **kwargs):
'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HTTP request by d... | -232,844,477,654,339,100 | Get Team assignments for a user # noqa: E501
Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = ... | pycherwell/api/teams_api.py | teams_get_users_teams_v2 | greenpau/pycherwell | python | def teams_get_users_teams_v2(self, user_record_id, **kwargs):
'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HTTP request by d... |
def teams_get_users_teams_v2_with_http_info(self, user_record_id, **kwargs):
'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HT... | 6,970,159,966,320,876,000 | Get Team assignments for a user # noqa: E501
Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = ... | pycherwell/api/teams_api.py | teams_get_users_teams_v2_with_http_info | greenpau/pycherwell | python | def teams_get_users_teams_v2_with_http_info(self, user_record_id, **kwargs):
'Get Team assignments for a user # noqa: E501\n\n Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501\n This method makes a synchronous HT... |
def teams_get_workgroups_v1(self, **kwargs):
'Get all available Workgroups # noqa: E501\n\n Operation to get IDs and names for all available Workgroups. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n... | -36,594,141,104,595,010 | Get all available Workgroups # noqa: E501
Operation to get IDs and names for all available Workgroups. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_workgroups_v1(async_req=True)
>>> result = thread.ge... | pycherwell/api/teams_api.py | teams_get_workgroups_v1 | greenpau/pycherwell | python | def teams_get_workgroups_v1(self, **kwargs):
'Get all available Workgroups # noqa: E501\n\n Operation to get IDs and names for all available Workgroups. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n... |
def teams_get_workgroups_v1_with_http_info(self, **kwargs):
'Get all available Workgroups # noqa: E501\n\n Operation to get IDs and names for all available Workgroups. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass a... | -671,416,137,976,133,400 | Get all available Workgroups # noqa: E501
Operation to get IDs and names for all available Workgroups. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.teams_get_workgroups_v1_with_http_info(async_req=True)
>>> res... | pycherwell/api/teams_api.py | teams_get_workgroups_v1_with_http_info | greenpau/pycherwell | python | def teams_get_workgroups_v1_with_http_info(self, **kwargs):
'Get all available Workgroups # noqa: E501\n\n Operation to get IDs and names for all available Workgroups. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass a... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.