body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def getFonts(self, filterByFontFormat=[], variableFont=None):
'\n Calculate list of fonts of this package by applying filters for\n font.format and font.variableFont (possibly more in the future)\n '
def passedFilter(font):
passed1 = ((not filterByFontFormat) or (filterByFontFormat... | 7,343,162,613,261,906,000 | Calculate list of fonts of this package by applying filters for
font.format and font.variableFont (possibly more in the future) | Lib/typeworld/api/__init__.py | getFonts | typeWorld/api | python | def getFonts(self, filterByFontFormat=[], variableFont=None):
'\n Calculate list of fonts of this package by applying filters for\n font.format and font.variableFont (possibly more in the future)\n '
def passedFilter(font):
passed1 = ((not filterByFontFormat) or (filterByFontFormat... |
def getLicense(self):
' Returns the ::License:: object that this font references.\n '
return self.parent.parent.parent.getLicenseByKeyword(self.keyword) | -8,534,528,834,294,439,000 | Returns the ::License:: object that this font references. | Lib/typeworld/api/__init__.py | getLicense | typeWorld/api | python | def getLicense(self):
' \n '
return self.parent.parent.parent.getLicenseByKeyword(self.keyword) |
def isFontSpecific(self):
' Returns True if this version is defined at the font level.\n Returns False if this version is defined at the family level.\n '
return issubclass(self.parent.__class__, Font) | -7,631,871,248,832,800,000 | Returns True if this version is defined at the font level.
Returns False if this version is defined at the family level. | Lib/typeworld/api/__init__.py | isFontSpecific | typeWorld/api | python | def isFontSpecific(self):
' Returns True if this version is defined at the font level.\n Returns False if this version is defined at the family level.\n '
return issubclass(self.parent.__class__, Font) |
def filename(self, version):
' Returns the recommended font file name to be used to store the font on disk.\n\n It is composed of the font’s uniqueID, its version string and the file\n extension. Together, they must not exceed 220 characters.\n '
if (not (type(version) in (str, int, ... | -8,905,214,114,146,957,000 | Returns the recommended font file name to be used to store the font on disk.
It is composed of the font’s uniqueID, its version string and the file
extension. Together, they must not exceed 220 characters. | Lib/typeworld/api/__init__.py | filename | typeWorld/api | python | def filename(self, version):
' Returns the recommended font file name to be used to store the font on disk.\n\n It is composed of the font’s uniqueID, its version string and the file\n extension. Together, they must not exceed 220 characters.\n '
if (not (type(version) in (str, int, ... |
def getBillboardURLs(self):
' Returns list billboardURLs compiled from ::Font.billboardURLs::\n and ::Family.billboardURLs::, giving the font-level definitions priority\n over family-level definitions.\n '
return (self.billboardURLs or self.parent.billboardURLs) | 5,772,845,123,857,825,000 | Returns list billboardURLs compiled from ::Font.billboardURLs::
and ::Family.billboardURLs::, giving the font-level definitions priority
over family-level definitions. | Lib/typeworld/api/__init__.py | getBillboardURLs | typeWorld/api | python | def getBillboardURLs(self):
' Returns list billboardURLs compiled from ::Font.billboardURLs::\n and ::Family.billboardURLs::, giving the font-level definitions priority\n over family-level definitions.\n '
return (self.billboardURLs or self.parent.billboardURLs) |
def getVersions(self):
' Returns list of ::Version:: objects.\n\n This is the final list based on the version information in this font object as\n well as in its parent ::Family:: object. Please read the section about\n [versioning](#versioning) above.\n '
if (not self.hasVers... | 4,009,952,125,952,453,600 | Returns list of ::Version:: objects.
This is the final list based on the version information in this font object as
well as in its parent ::Family:: object. Please read the section about
[versioning](#versioning) above. | Lib/typeworld/api/__init__.py | getVersions | typeWorld/api | python | def getVersions(self):
' Returns list of ::Version:: objects.\n\n This is the final list based on the version information in this font object as\n well as in its parent ::Family:: object. Please read the section about\n [versioning](#versioning) above.\n '
if (not self.hasVers... |
def getDesigners(self):
' Returns a list of ::Designer:: objects that this font references.\n These are the combination of family-level designers and font-level designers.\n The same logic as for versioning applies.\n Please read the section about [versioning](#versioning) above.\n ... | -3,829,965,687,416,815,600 | Returns a list of ::Designer:: objects that this font references.
These are the combination of family-level designers and font-level designers.
The same logic as for versioning applies.
Please read the section about [versioning](#versioning) above. | Lib/typeworld/api/__init__.py | getDesigners | typeWorld/api | python | def getDesigners(self):
' Returns a list of ::Designer:: objects that this font references.\n These are the combination of family-level designers and font-level designers.\n The same logic as for versioning applies.\n Please read the section about [versioning](#versioning) above.\n ... |
def getAllDesigners(self):
' Returns a list of ::Designer:: objects that represent all of the designers\n referenced both at the family level as well as with all the family’s fonts,\n in case the fonts carry specific designers. This could be used to give a\n one-glance overview of all de... | -1,701,254,645,675,247,600 | Returns a list of ::Designer:: objects that represent all of the designers
referenced both at the family level as well as with all the family’s fonts,
in case the fonts carry specific designers. This could be used to give a
one-glance overview of all designers involved. | Lib/typeworld/api/__init__.py | getAllDesigners | typeWorld/api | python | def getAllDesigners(self):
' Returns a list of ::Designer:: objects that represent all of the designers\n referenced both at the family level as well as with all the family’s fonts,\n in case the fonts carry specific designers. This could be used to give a\n one-glance overview of all de... |
def forward(self, s, state=None, info={}):
's -> Q(s, \\*)'
(logits, h) = self.preprocess(s, state)
logits = F.softmax(self.last(logits), dim=(- 1))
return (logits, h) | 6,090,292,889,799,149,000 | s -> Q(s, \*) | tianshou/utils/net/discrete.py | forward | FightingSrain/tianshou | python | def forward(self, s, state=None, info={}):
's -> Q(s, \\*)'
(logits, h) = self.preprocess(s, state)
logits = F.softmax(self.last(logits), dim=(- 1))
return (logits, h) |
def forward(self, s, **kwargs):
's -> V(s)'
(logits, h) = self.preprocess(s, state=kwargs.get('state', None))
logits = self.last(logits)
return logits | -5,762,130,964,247,468,000 | s -> V(s) | tianshou/utils/net/discrete.py | forward | FightingSrain/tianshou | python | def forward(self, s, **kwargs):
(logits, h) = self.preprocess(s, state=kwargs.get('state', None))
logits = self.last(logits)
return logits |
def forward(self, x, state=None, info={}):
'x -> Q(x, \\*)'
if (not isinstance(x, torch.Tensor)):
x = torch.tensor(x, device=self.device, dtype=torch.float32)
x = x.permute(0, 3, 1, 2)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(... | -6,293,112,128,924,184,000 | x -> Q(x, \*) | tianshou/utils/net/discrete.py | forward | FightingSrain/tianshou | python | def forward(self, x, state=None, info={}):
'x -> Q(x, \\*)'
if (not isinstance(x, torch.Tensor)):
x = torch.tensor(x, device=self.device, dtype=torch.float32)
x = x.permute(0, 3, 1, 2)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(... |
def test_set_tokens_credentials(client):
'Test setting the tokens using credentials'
client.refresh_token = None
del client.session.headers['Authorization']
client._set_tokens()
assert client.refresh_token
assert ('Authorization' in client.session.headers) | -8,691,619,730,239,429,000 | Test setting the tokens using credentials | tests/test_client.py | test_set_tokens_credentials | MuckRock/python-documentcloud | python | def test_set_tokens_credentials(client):
client.refresh_token = None
del client.session.headers['Authorization']
client._set_tokens()
assert client.refresh_token
assert ('Authorization' in client.session.headers) |
def test_set_tokens_refresh(client):
'Test setting the tokens using refresh token'
client.refresh_token = None
del client.session.headers['Authorization']
client._set_tokens()
client._set_tokens()
assert client.refresh_token
assert ('Authorization' in client.session.headers) | 2,197,476,233,905,266,400 | Test setting the tokens using refresh token | tests/test_client.py | test_set_tokens_refresh | MuckRock/python-documentcloud | python | def test_set_tokens_refresh(client):
client.refresh_token = None
del client.session.headers['Authorization']
client._set_tokens()
client._set_tokens()
assert client.refresh_token
assert ('Authorization' in client.session.headers) |
def test_set_tokens_none(public_client):
'Test setting the tokens with no credentials'
public_client._set_tokens()
assert (public_client.refresh_token is None)
assert ('Authorization' not in public_client.session.headers) | -8,181,585,842,802,609,000 | Test setting the tokens with no credentials | tests/test_client.py | test_set_tokens_none | MuckRock/python-documentcloud | python | def test_set_tokens_none(public_client):
public_client._set_tokens()
assert (public_client.refresh_token is None)
assert ('Authorization' not in public_client.session.headers) |
def test_get_tokens(client):
'Test getting access and refresh tokens using valid credentials'
(access, refresh) = client._get_tokens(client.username, client.password)
assert access
assert refresh | 1,387,383,972,610,002,000 | Test getting access and refresh tokens using valid credentials | tests/test_client.py | test_get_tokens | MuckRock/python-documentcloud | python | def test_get_tokens(client):
(access, refresh) = client._get_tokens(client.username, client.password)
assert access
assert refresh |
def test_get_tokens_bad_credentials(client):
'Test getting access and refresh tokens using invalid credentials'
with pytest.raises(CredentialsFailedError):
client._get_tokens(client.username, 'foo') | -6,442,918,922,323,646,000 | Test getting access and refresh tokens using invalid credentials | tests/test_client.py | test_get_tokens_bad_credentials | MuckRock/python-documentcloud | python | def test_get_tokens_bad_credentials(client):
with pytest.raises(CredentialsFailedError):
client._get_tokens(client.username, 'foo') |
def test_refresh_tokens(client):
'Test refreshing the tokens'
(access, refresh) = client._refresh_tokens(client.refresh_token)
assert access
assert refresh | -4,345,584,591,935,057,000 | Test refreshing the tokens | tests/test_client.py | test_refresh_tokens | MuckRock/python-documentcloud | python | def test_refresh_tokens(client):
(access, refresh) = client._refresh_tokens(client.refresh_token)
assert access
assert refresh |
def _parse_response(self, response):
'\n Parse http raw respone into python\n dictionary object.\n\n :param str response: http response\n :returns: response dict\n :rtype: dict\n '
response_dict = {}
for line in response.splitlines():
(key, value) = response... | -4,143,355,550,308,036,600 | Parse http raw respone into python
dictionary object.
:param str response: http response
:returns: response dict
:rtype: dict | sendsms/backends/esendex.py | _parse_response | codesankalp/django-sendsms | python | def _parse_response(self, response):
'\n Parse http raw respone into python\n dictionary object.\n\n :param str response: http response\n :returns: response dict\n :rtype: dict\n '
response_dict = {}
for line in response.splitlines():
(key, value) = response... |
def _send(self, message):
'\n Private method to send one message.\n\n :param SmsMessage message: SmsMessage class instance.\n :returns: True if message is sent else False\n :rtype: bool\n '
params = {'EsendexUsername': self.get_username(), 'EsendexPassword': self.get_password(... | 7,000,168,824,280,457,000 | Private method to send one message.
:param SmsMessage message: SmsMessage class instance.
:returns: True if message is sent else False
:rtype: bool | sendsms/backends/esendex.py | _send | codesankalp/django-sendsms | python | def _send(self, message):
'\n Private method to send one message.\n\n :param SmsMessage message: SmsMessage class instance.\n :returns: True if message is sent else False\n :rtype: bool\n '
params = {'EsendexUsername': self.get_username(), 'EsendexPassword': self.get_password(... |
def send_messages(self, messages):
'\n Send messages.\n\n :param list messages: List of SmsMessage instances.\n :returns: number of messages sended successful.\n :rtype: int\n '
counter = 0
for message in messages:
res = self._send(message)
if res:
... | 5,847,916,432,418,928,000 | Send messages.
:param list messages: List of SmsMessage instances.
:returns: number of messages sended successful.
:rtype: int | sendsms/backends/esendex.py | send_messages | codesankalp/django-sendsms | python | def send_messages(self, messages):
'\n Send messages.\n\n :param list messages: List of SmsMessage instances.\n :returns: number of messages sended successful.\n :rtype: int\n '
counter = 0
for message in messages:
res = self._send(message)
if res:
... |
@commands.slash_command(name='layer')
async def _layer(self, inter: disnake.AppCmdInter, templates: str):
'Layer several templates.\n\n Parameters\n ----------\n templates: List of templates (URL or name) separated by a space (last goes above).\n '
(await inter.response.defer())
... | -8,327,733,782,223,394,000 | Layer several templates.
Parameters
----------
templates: List of templates (URL or name) separated by a space (last goes above). | src/cogs/pxls_template/layer.py | _layer | GrayTurtles/Clueless | python | @commands.slash_command(name='layer')
async def _layer(self, inter: disnake.AppCmdInter, templates: str):
'Layer several templates.\n\n Parameters\n ----------\n templates: List of templates (URL or name) separated by a space (last goes above).\n '
(await inter.response.defer())
... |
@conf
def find_cuda_libs(self):
"\n\tfind the cuda include and library folders\n\n\tuse ctx.program(source='main.c', target='app', use='CUDA CUDART')\n\t"
if (not self.env.NVCC):
self.fatal('check for nvcc first')
d = self.root.find_node(self.env.NVCC[0]).parent.parent
node = d.find_node('includ... | 7,435,091,890,794,082,000 | find the cuda include and library folders
use ctx.program(source='main.c', target='app', use='CUDA CUDART') | Firmware/ardupilot/modules/waf/playground/cuda/cuda.py | find_cuda_libs | eanswer/LearningToFly | python | @conf
def find_cuda_libs(self):
"\n\tfind the cuda include and library folders\n\n\tuse ctx.program(source='main.c', target='app', use='CUDA CUDART')\n\t"
if (not self.env.NVCC):
self.fatal('check for nvcc first')
d = self.root.find_node(self.env.NVCC[0]).parent.parent
node = d.find_node('includ... |
def arguments_window(args: Namespace) -> ArgumentsResults:
'Window interface\n\n :param args: the arguments passed from the command line\n :return: Tuple[Union[str, None], Namespace] - The new arguments\n '
filename: str = ((C.SAVE_FILE_DIR + args.lottery_type) + C.SAVE_FILE_TYPE)
layout = [[sg.Tex... | 910,027,326,538,831,600 | Window interface
:param args: the arguments passed from the command line
:return: Tuple[Union[str, None], Namespace] - The new arguments | gui_arguments.py | arguments_window | bernduwiesner/GenLottery | python | def arguments_window(args: Namespace) -> ArgumentsResults:
'Window interface\n\n :param args: the arguments passed from the command line\n :return: Tuple[Union[str, None], Namespace] - The new arguments\n '
filename: str = ((C.SAVE_FILE_DIR + args.lottery_type) + C.SAVE_FILE_TYPE)
layout = [[sg.Tex... |
def calculate_perplexity(models, coefs, data):
'\n Calculate perplexity with given model\n :param models: language models\n :param coefs: coefficients\n :param data: test data\n :return: perplexity\n '
pp = 0
uniform_prob = []
unigram_prob = []
bigram_prob = []
trigram_prob = [... | -6,578,478,251,837,124,000 | Calculate perplexity with given model
:param models: language models
:param coefs: coefficients
:param data: test data
:return: perplexity | lm.py | calculate_perplexity | alvisdeng/NLP-Language-Model | python | def calculate_perplexity(models, coefs, data):
'\n Calculate perplexity with given model\n :param models: language models\n :param coefs: coefficients\n :param data: test data\n :return: perplexity\n '
pp = 0
uniform_prob = []
unigram_prob = []
bigram_prob = []
trigram_prob = [... |
def parse_args():
'\n Parse input positional arguments from command line\n :return: args - parsed arguments\n '
parser = argparse.ArgumentParser('N-gram Language Model')
parser.add_argument('coef_unif', help='coefficient for the uniform model.', type=float)
parser.add_argument('coef_uni', help=... | -2,590,715,522,766,917,000 | Parse input positional arguments from command line
:return: args - parsed arguments | lm.py | parse_args | alvisdeng/NLP-Language-Model | python | def parse_args():
'\n Parse input positional arguments from command line\n :return: args - parsed arguments\n '
parser = argparse.ArgumentParser('N-gram Language Model')
parser.add_argument('coef_unif', help='coefficient for the uniform model.', type=float)
parser.add_argument('coef_uni', help=... |
def __init__(self, corpus, ngram, min_freq, uniform=False):
'\n Initialize language model\n :param corpus: input text corpus to build LM on\n :param ngram: number of n-gram, e.g. 1, 2, 3, ...\n :param min_freq: minimum frequency threshold to set a word to UNK placeholder\n ... | 3,727,620,931,636,181,500 | Initialize language model
:param corpus: input text corpus to build LM on
:param ngram: number of n-gram, e.g. 1, 2, 3, ...
:param min_freq: minimum frequency threshold to set a word to UNK placeholder
set to 1 to not use this threshold
:param uniform: boolean flag, set to True to indicate this model i... | lm.py | __init__ | alvisdeng/NLP-Language-Model | python | def __init__(self, corpus, ngram, min_freq, uniform=False):
'\n Initialize language model\n :param corpus: input text corpus to build LM on\n :param ngram: number of n-gram, e.g. 1, 2, 3, ...\n :param min_freq: minimum frequency threshold to set a word to UNK placeholder\n ... |
def build(self):
'\n Build LM from text corpus\n '
if self.uniform:
self.uniform_table = get_uniform_tables(self.V)
elif (self.ngram == 1):
self.unigram_table = get_unigram_tables(self.V, self.N, self.counter_1gram, self.word_to_idx)
elif (self.ngram == 2):
self.cor... | -3,804,000,836,486,489,000 | Build LM from text corpus | lm.py | build | alvisdeng/NLP-Language-Model | python | def build(self):
'\n \n '
if self.uniform:
self.uniform_table = get_uniform_tables(self.V)
elif (self.ngram == 1):
self.unigram_table = get_unigram_tables(self.V, self.N, self.counter_1gram, self.word_to_idx)
elif (self.ngram == 2):
self.corpus_2gram = [(self.corpus... |
def most_common_words(self, k):
'\n Return the top-k most frequent n-grams and their frequencies in sorted order.\n For uniform models, the frequency should be "1" for each token.\n\n Your return should be sorted in descending order of frequency.\n Sort according to ascending alphabet or... | -1,959,674,398,925,063,700 | Return the top-k most frequent n-grams and their frequencies in sorted order.
For uniform models, the frequency should be "1" for each token.
Your return should be sorted in descending order of frequency.
Sort according to ascending alphabet order when multiple words have same frequency.
:return: list[tuple(token, fre... | lm.py | most_common_words | alvisdeng/NLP-Language-Model | python | def most_common_words(self, k):
'\n Return the top-k most frequent n-grams and their frequencies in sorted order.\n For uniform models, the frequency should be "1" for each token.\n\n Your return should be sorted in descending order of frequency.\n Sort according to ascending alphabet or... |
@option(Configs.model)
def autoregressive_model(c: Configs):
'\n ### Initialize the auto-regressive model\n '
from labml_nn.transformers.xl import RelativeMultiHeadAttention
from labml_nn.transformers.feed_forward import FeedForward
m = AutoregressiveModel(c.n_tokens, c.d_model, CompressiveTransfo... | 5,798,183,949,863,375,000 | ### Initialize the auto-regressive model | labml_nn/transformers/compressive/experiment.py | autoregressive_model | Aarsh2001/annotated_deep_learning_paper_implementations | python | @option(Configs.model)
def autoregressive_model(c: Configs):
'\n \n '
from labml_nn.transformers.xl import RelativeMultiHeadAttention
from labml_nn.transformers.feed_forward import FeedForward
m = AutoregressiveModel(c.n_tokens, c.d_model, CompressiveTransformer(CompressiveTransformerLayer(d_model... |
@option(Configs.attention_reconstruction_loss)
def attention_reconstruction_loss(c: Configs):
'\n ### Initialize the attention reconstruction loss\n '
return AttentionReconstructionLoss(c.model.transformer.layers) | -6,834,483,399,557,530,000 | ### Initialize the attention reconstruction loss | labml_nn/transformers/compressive/experiment.py | attention_reconstruction_loss | Aarsh2001/annotated_deep_learning_paper_implementations | python | @option(Configs.attention_reconstruction_loss)
def attention_reconstruction_loss(c: Configs):
'\n \n '
return AttentionReconstructionLoss(c.model.transformer.layers) |
def main():
'\n ### Run the experiment\n '
experiment.create(name='compressive_transformer', comment='')
conf = Configs()
experiment.configs(conf, {'tokenizer': 'character', 'text': 'tiny_shakespeare', 'optimizer.learning_rate': 0.00025, 'optimizer.optimizer': 'AdamW', 'prompt': 'It is', 'prompt_s... | 6,623,430,949,124,413,000 | ### Run the experiment | labml_nn/transformers/compressive/experiment.py | main | Aarsh2001/annotated_deep_learning_paper_implementations | python | def main():
'\n \n '
experiment.create(name='compressive_transformer', comment=)
conf = Configs()
experiment.configs(conf, {'tokenizer': 'character', 'text': 'tiny_shakespeare', 'optimizer.learning_rate': 0.00025, 'optimizer.optimizer': 'AdamW', 'prompt': 'It is', 'prompt_separator': , 'train_load... |
@torch.no_grad()
def merge_compress_memory(self, mem: CompressedMemory, new_mem: List[torch.Tensor]) -> Tuple[(CompressedMemory, List[torch.Tensor])]:
'\n Concatenate new memories and compress the oldest memories.\n '
if ((self.mem_len == 0) and (self.c_mem_len == 0)):
return (CompressedMe... | 1,156,103,335,448,820,500 | Concatenate new memories and compress the oldest memories. | labml_nn/transformers/compressive/experiment.py | merge_compress_memory | Aarsh2001/annotated_deep_learning_paper_implementations | python | @torch.no_grad()
def merge_compress_memory(self, mem: CompressedMemory, new_mem: List[torch.Tensor]) -> Tuple[(CompressedMemory, List[torch.Tensor])]:
'\n \n '
if ((self.mem_len == 0) and (self.c_mem_len == 0)):
return (CompressedMemory([], []), [])
if (mem is not None):
(mem, ... |
def step(self, batch: any, batch_idx: BatchIndex):
'\n ### Training/validation step\n '
(data, target) = (batch[0].to(self.device), batch[1].to(self.device))
if self.mode.is_train:
tracker.add_global_step((data.shape[0] * data.shape[1]))
with self.mode.update(is_log_activations=bat... | -1,364,598,047,004,983,000 | ### Training/validation step | labml_nn/transformers/compressive/experiment.py | step | Aarsh2001/annotated_deep_learning_paper_implementations | python | def step(self, batch: any, batch_idx: BatchIndex):
'\n \n '
(data, target) = (batch[0].to(self.device), batch[1].to(self.device))
if self.mode.is_train:
tracker.add_global_step((data.shape[0] * data.shape[1]))
with self.mode.update(is_log_activations=batch_idx.is_last):
mem... |
def sample(self):
'\n ### Sampling function to generate samples periodically while training\n '
prompt = self.prompt
log = [(prompt, Text.subtle)]
mem = CompressedMemory([], [])
for i in monit.iterate('Sample', 25):
data = self.text.text_to_i(prompt).unsqueeze((- 1))
da... | 6,554,604,733,984,911,000 | ### Sampling function to generate samples periodically while training | labml_nn/transformers/compressive/experiment.py | sample | Aarsh2001/annotated_deep_learning_paper_implementations | python | def sample(self):
'\n \n '
prompt = self.prompt
log = [(prompt, Text.subtle)]
mem = CompressedMemory([], [])
for i in monit.iterate('Sample', 25):
data = self.text.text_to_i(prompt).unsqueeze((- 1))
data = data.to(self.device)
(output, new_mem) = self.model(data... |
def scores(self, scaling):
"Compute site and species scores for different scalings.\n\n Parameters\n ----------\n scaling : int\n\n For a more detailed explanation of the interpretation, check\n Legendre & Legendre 1998, section 9.4.3. The notes that\n follow ar... | -3,771,814,060,755,760,600 | Compute site and species scores for different scalings.
Parameters
----------
scaling : int
For a more detailed explanation of the interpretation, check
Legendre & Legendre 1998, section 9.4.3. The notes that
follow are quick recommendations.
Scaling type 1 maintains :math:`\chi^2` distances between
... | skbio/stats/ordination/_correspondence_analysis.py | scores | JWDebelius/scikit-bio | python | def scores(self, scaling):
"Compute site and species scores for different scalings.\n\n Parameters\n ----------\n scaling : int\n\n For a more detailed explanation of the interpretation, check\n Legendre & Legendre 1998, section 9.4.3. The notes that\n follow ar... |
def testStatementResponse(self):
'Test StatementResponse'
pass | -677,246,967,601,330,200 | Test StatementResponse | test/test_statement_response.py | testStatementResponse | mxenabled/mx-platform-python | python | def testStatementResponse(self):
pass |
def AIC(N, rho, k):
'Akaike Information Criterion\n\n :param rho: rho at order k\n :param N: sample size\n :param k: AR order.\n\n If k is the AR order and N the size of the sample, then Akaike criterion is\n\n .. math:: AIC(k) = \\log(\\rho_k) + 2\\frac{k+1}{N}\n\n ::\n\n AIC(64, [0.5,0.3,... | -8,007,909,734,390,670,000 | Akaike Information Criterion
:param rho: rho at order k
:param N: sample size
:param k: AR order.
If k is the AR order and N the size of the sample, then Akaike criterion is
.. math:: AIC(k) = \log(\rho_k) + 2\frac{k+1}{N}
::
AIC(64, [0.5,0.3,0.2], [1,2,3])
:validation: double checked versus octave. | src/spectrum/criteria.py | AIC | butala/spectrum | python | def AIC(N, rho, k):
'Akaike Information Criterion\n\n :param rho: rho at order k\n :param N: sample size\n :param k: AR order.\n\n If k is the AR order and N the size of the sample, then Akaike criterion is\n\n .. math:: AIC(k) = \\log(\\rho_k) + 2\\frac{k+1}{N}\n\n ::\n\n AIC(64, [0.5,0.3,... |
def AICc(N, rho, k, norm=True):
'corrected Akaike information criterion\n\n .. math:: AICc(k) = log(\\rho_k) + 2 \\frac{k+1}{N-k-2}\n\n\n :validation: double checked versus octave.\n '
from numpy import log, array
p = k
res = (log(rho) + ((2.0 * (p + 1)) / ((N - p) - 2)))
return res | 7,617,045,710,475,825,000 | corrected Akaike information criterion
.. math:: AICc(k) = log(\rho_k) + 2 \frac{k+1}{N-k-2}
:validation: double checked versus octave. | src/spectrum/criteria.py | AICc | butala/spectrum | python | def AICc(N, rho, k, norm=True):
'corrected Akaike information criterion\n\n .. math:: AICc(k) = log(\\rho_k) + 2 \\frac{k+1}{N-k-2}\n\n\n :validation: double checked versus octave.\n '
from numpy import log, array
p = k
res = (log(rho) + ((2.0 * (p + 1)) / ((N - p) - 2)))
return res |
def KIC(N, rho, k):
'Kullback information criterion\n\n .. math:: KIC(k) = log(\\rho_k) + 3 \\frac{k+1}{N}\n\n :validation: double checked versus octave.\n '
from numpy import log, array
res = (log(rho) + ((3.0 * (k + 1.0)) / float(N)))
return res | -1,878,092,060,176,235,500 | Kullback information criterion
.. math:: KIC(k) = log(\rho_k) + 3 \frac{k+1}{N}
:validation: double checked versus octave. | src/spectrum/criteria.py | KIC | butala/spectrum | python | def KIC(N, rho, k):
'Kullback information criterion\n\n .. math:: KIC(k) = log(\\rho_k) + 3 \\frac{k+1}{N}\n\n :validation: double checked versus octave.\n '
from numpy import log, array
res = (log(rho) + ((3.0 * (k + 1.0)) / float(N)))
return res |
def AKICc(N, rho, k):
'approximate corrected Kullback information\n\n .. math:: AKICc(k) = log(rho_k) + \\frac{p}{N*(N-k)} + (3-\\frac{k+2}{N})*\\frac{k+1}{N-k-2}\n\n '
from numpy import log, array
p = k
res = ((log(rho) + ((p / N) / (N - p))) + (((3.0 - ((p + 2.0) / N)) * (p + 1.0)) / ((N - p) - ... | 3,753,835,065,684,638,700 | approximate corrected Kullback information
.. math:: AKICc(k) = log(rho_k) + \frac{p}{N*(N-k)} + (3-\frac{k+2}{N})*\frac{k+1}{N-k-2} | src/spectrum/criteria.py | AKICc | butala/spectrum | python | def AKICc(N, rho, k):
'approximate corrected Kullback information\n\n .. math:: AKICc(k) = log(rho_k) + \\frac{p}{N*(N-k)} + (3-\\frac{k+2}{N})*\\frac{k+1}{N-k-2}\n\n '
from numpy import log, array
p = k
res = ((log(rho) + ((p / N) / (N - p))) + (((3.0 - ((p + 2.0) / N)) * (p + 1.0)) / ((N - p) - ... |
def FPE(N, rho, k=None):
'Final prediction error criterion\n\n .. math:: FPE(k) = \\frac{N + k + 1}{N - k - 1} \\rho_k\n\n :validation: double checked versus octave.\n\n '
fpe = ((rho * ((N + k) + 1.0)) / ((N - k) - 1))
return fpe | 6,012,714,880,795,500,000 | Final prediction error criterion
.. math:: FPE(k) = \frac{N + k + 1}{N - k - 1} \rho_k
:validation: double checked versus octave. | src/spectrum/criteria.py | FPE | butala/spectrum | python | def FPE(N, rho, k=None):
'Final prediction error criterion\n\n .. math:: FPE(k) = \\frac{N + k + 1}{N - k - 1} \\rho_k\n\n :validation: double checked versus octave.\n\n '
fpe = ((rho * ((N + k) + 1.0)) / ((N - k) - 1))
return fpe |
def MDL(N, rho, k):
'Minimum Description Length\n\n .. math:: MDL(k) = N log \\rho_k + p \\log N\n\n :validation: results\n '
from numpy import log
mdl = ((N * log(rho)) + (k * log(N)))
return mdl | -8,109,778,770,536,789,000 | Minimum Description Length
.. math:: MDL(k) = N log \rho_k + p \log N
:validation: results | src/spectrum/criteria.py | MDL | butala/spectrum | python | def MDL(N, rho, k):
'Minimum Description Length\n\n .. math:: MDL(k) = N log \\rho_k + p \\log N\n\n :validation: results\n '
from numpy import log
mdl = ((N * log(rho)) + (k * log(N)))
return mdl |
def CAT(N, rho, k):
'Criterion Autoregressive Transfer Function :\n\n .. math:: CAT(k) = \\frac{1}{N} \\sum_{i=1}^k \\frac{1}{\\rho_i} - \\frac{\\rho_i}{\\rho_k}\n\n .. todo:: validation\n '
from numpy import zeros, arange
cat = zeros(len(rho))
for p in arange(1, (len(rho) + 1)):
rho_p... | -2,045,143,425,089,680,600 | Criterion Autoregressive Transfer Function :
.. math:: CAT(k) = \frac{1}{N} \sum_{i=1}^k \frac{1}{\rho_i} - \frac{\rho_i}{\rho_k}
.. todo:: validation | src/spectrum/criteria.py | CAT | butala/spectrum | python | def CAT(N, rho, k):
'Criterion Autoregressive Transfer Function :\n\n .. math:: CAT(k) = \\frac{1}{N} \\sum_{i=1}^k \\frac{1}{\\rho_i} - \\frac{\\rho_i}{\\rho_k}\n\n .. todo:: validation\n '
from numpy import zeros, arange
cat = zeros(len(rho))
for p in arange(1, (len(rho) + 1)):
rho_p... |
def aic_eigen(s, N):
'AIC order-selection using eigen values\n\n :param s: a list of `p` sorted eigen values\n :param N: the size of the input data. To be defined precisely.\n\n :return:\n * an array containing the AIC values\n\n Given :math:`n` sorted eigen values :math:`\\lambda_i` with\n :m... | 7,812,365,131,708,917,000 | AIC order-selection using eigen values
:param s: a list of `p` sorted eigen values
:param N: the size of the input data. To be defined precisely.
:return:
* an array containing the AIC values
Given :math:`n` sorted eigen values :math:`\lambda_i` with
:math:`0 <= i < n`, the proposed criterion from Wax and Kailat... | src/spectrum/criteria.py | aic_eigen | butala/spectrum | python | def aic_eigen(s, N):
'AIC order-selection using eigen values\n\n :param s: a list of `p` sorted eigen values\n :param N: the size of the input data. To be defined precisely.\n\n :return:\n * an array containing the AIC values\n\n Given :math:`n` sorted eigen values :math:`\\lambda_i` with\n :m... |
def mdl_eigen(s, N):
'MDL order-selection using eigen values\n\n :param s: a list of `p` sorted eigen values\n :param N: the size of the input data. To be defined precisely.\n\n :return:\n * an array containing the AIC values\n\n .. math:: MDL(k) = (n-k)N \\ln \\frac{g(k)}{a(k)} + 0.5k(2n-k) log(... | -6,247,924,153,286,262,000 | MDL order-selection using eigen values
:param s: a list of `p` sorted eigen values
:param N: the size of the input data. To be defined precisely.
:return:
* an array containing the AIC values
.. math:: MDL(k) = (n-k)N \ln \frac{g(k)}{a(k)} + 0.5k(2n-k) log(N)
.. seealso:: :func:`aic_eigen` for details
:Referen... | src/spectrum/criteria.py | mdl_eigen | butala/spectrum | python | def mdl_eigen(s, N):
'MDL order-selection using eigen values\n\n :param s: a list of `p` sorted eigen values\n :param N: the size of the input data. To be defined precisely.\n\n :return:\n * an array containing the AIC values\n\n .. math:: MDL(k) = (n-k)N \\ln \\frac{g(k)}{a(k)} + 0.5k(2n-k) log(... |
def __init__(self, name, N):
"Create a criteria object\n\n :param name: a string or list of strings containing valid criteria\n method's name\n :param int N: size of the data sample.\n\n "
self.__name = None
self.name = name
self.__N = N
self.__rho = 0
self.__k = ... | -6,479,387,712,756,581,000 | Create a criteria object
:param name: a string or list of strings containing valid criteria
method's name
:param int N: size of the data sample. | src/spectrum/criteria.py | __init__ | butala/spectrum | python | def __init__(self, name, N):
"Create a criteria object\n\n :param name: a string or list of strings containing valid criteria\n method's name\n :param int N: size of the data sample.\n\n "
self.__name = None
self.name = name
self.__N = N
self.__rho = 0
self.__k = ... |
def __call__(self, rho=None, k=None, N=None, norm=True):
'Call the criteria function corresponding to :attr:`name`.'
self.__norm = norm
if (N is not None):
self.N = N
if (rho is not None):
self.rho = rho
if (k is not None):
self.__k = k
self.__norm = norm
f = eval(sel... | -6,886,929,687,545,804,000 | Call the criteria function corresponding to :attr:`name`. | src/spectrum/criteria.py | __call__ | butala/spectrum | python | def __call__(self, rho=None, k=None, N=None, norm=True):
self.__norm = norm
if (N is not None):
self.N = N
if (rho is not None):
self.rho = rho
if (k is not None):
self.__k = k
self.__norm = norm
f = eval(self.name)
self.data = f(self.N, self.rho, self.k)
if ... |
def stop(self, force=False):
'\n Stop seed workers by sending None-sentinel and joining the workers.\n\n :param force: Skip sending None-sentinel and join with a timeout.\n For use when workers might be shutdown already by KeyboardInterrupt.\n '
if (not force):
... | 4,690,955,062,984,198,000 | Stop seed workers by sending None-sentinel and joining the workers.
:param force: Skip sending None-sentinel and join with a timeout.
For use when workers might be shutdown already by KeyboardInterrupt. | mapproxy/seed/seeder.py | stop | GeoplexGIS/mapproxy | python | def stop(self, force=False):
'\n Stop seed workers by sending None-sentinel and joining the workers.\n\n :param force: Skip sending None-sentinel and join with a timeout.\n For use when workers might be shutdown already by KeyboardInterrupt.\n '
if (not force):
... |
@staticmethod
def can_skip(old_progress, current_progress):
"\n Return True if the `current_progress` is behind the `old_progress` -\n when it isn't as far as the old progress.\n\n >>> SeedProgress.can_skip(None, [(0, 4)])\n False\n >>> SeedProgress.can_skip([], [(0, 4)])\n ... | 1,151,653,813,984,509,800 | Return True if the `current_progress` is behind the `old_progress` -
when it isn't as far as the old progress.
>>> SeedProgress.can_skip(None, [(0, 4)])
False
>>> SeedProgress.can_skip([], [(0, 4)])
True
>>> SeedProgress.can_skip([(0, 4)], None)
False
>>> SeedProgress.can_skip([(0, 4)], [(0, 4)])
False
>>> SeedProgres... | mapproxy/seed/seeder.py | can_skip | GeoplexGIS/mapproxy | python | @staticmethod
def can_skip(old_progress, current_progress):
"\n Return True if the `current_progress` is behind the `old_progress` -\n when it isn't as far as the old progress.\n\n >>> SeedProgress.can_skip(None, [(0, 4)])\n False\n >>> SeedProgress.can_skip([], [(0, 4)])\n ... |
def _walk(self, cur_bbox, levels, current_level=0, all_subtiles=False):
'\n :param cur_bbox: the bbox to seed in this call\n :param levels: list of levels to seed\n :param all_subtiles: seed all subtiles and do not check for\n intersections with bbox/geom\n '
... | 7,811,925,727,793,627,000 | :param cur_bbox: the bbox to seed in this call
:param levels: list of levels to seed
:param all_subtiles: seed all subtiles and do not check for
intersections with bbox/geom | mapproxy/seed/seeder.py | _walk | GeoplexGIS/mapproxy | python | def _walk(self, cur_bbox, levels, current_level=0, all_subtiles=False):
'\n :param cur_bbox: the bbox to seed in this call\n :param levels: list of levels to seed\n :param all_subtiles: seed all subtiles and do not check for\n intersections with bbox/geom\n '
... |
def _filter_subtiles(self, subtiles, all_subtiles):
'\n Return an iterator with all sub tiles.\n Yields (None, None, None) for non-intersecting tiles,\n otherwise (subtile, subtile_bbox, intersection).\n '
for subtile in subtiles:
if (subtile is None):
(yield (Non... | -8,990,287,347,691,366,000 | Return an iterator with all sub tiles.
Yields (None, None, None) for non-intersecting tiles,
otherwise (subtile, subtile_bbox, intersection). | mapproxy/seed/seeder.py | _filter_subtiles | GeoplexGIS/mapproxy | python | def _filter_subtiles(self, subtiles, all_subtiles):
'\n Return an iterator with all sub tiles.\n Yields (None, None, None) for non-intersecting tiles,\n otherwise (subtile, subtile_bbox, intersection).\n '
for subtile in subtiles:
if (subtile is None):
(yield (Non... |
def testVnicEthAdapterPolicyList(self):
'Test VnicEthAdapterPolicyList'
pass | -3,592,398,631,652,934,700 | Test VnicEthAdapterPolicyList | test/test_vnic_eth_adapter_policy_list.py | testVnicEthAdapterPolicyList | CiscoUcs/intersight-python | python | def testVnicEthAdapterPolicyList(self):
pass |
def timestamp():
'Get a precise timestamp'
return _clock_func() | -8,491,789,232,951,088,000 | Get a precise timestamp | auto1/venv/Lib/site-packages/pywinauto/timings.py | timestamp | snakyhuman/auto-tests | python | def timestamp():
return _clock_func() |
def always_wait_until(timeout, retry_interval, value=True, op=operator.eq):
'Decorator to call wait_until(...) every time for a decorated function/method'
def wait_until_decorator(func):
'Callable object that must be returned by the @always_wait_until decorator'
@wraps(func)
def wrappe... | 1,975,085,348,732,868,400 | Decorator to call wait_until(...) every time for a decorated function/method | auto1/venv/Lib/site-packages/pywinauto/timings.py | always_wait_until | snakyhuman/auto-tests | python | def always_wait_until(timeout, retry_interval, value=True, op=operator.eq):
def wait_until_decorator(func):
'Callable object that must be returned by the @always_wait_until decorator'
@wraps(func)
def wrapper(*args, **kwargs):
'pre-callback, target function call and post-c... |
def wait_until(timeout, retry_interval, func, value=True, op=operator.eq, *args, **kwargs):
'\n Wait until ``op(function(*args, **kwargs), value)`` is True or until timeout expires\n\n * **timeout** how long the function will try the function\n * **retry_interval** how long to wait between retries\n *... | 8,987,653,373,818,958,000 | Wait until ``op(function(*args, **kwargs), value)`` is True or until timeout expires
* **timeout** how long the function will try the function
* **retry_interval** how long to wait between retries
* **func** the function that will be executed
* **value** the value to be compared against (defaults to True)
* **op** ... | auto1/venv/Lib/site-packages/pywinauto/timings.py | wait_until | snakyhuman/auto-tests | python | def wait_until(timeout, retry_interval, func, value=True, op=operator.eq, *args, **kwargs):
'\n Wait until ``op(function(*args, **kwargs), value)`` is True or until timeout expires\n\n * **timeout** how long the function will try the function\n * **retry_interval** how long to wait between retries\n *... |
def always_wait_until_passes(timeout, retry_interval, exceptions=Exception):
'Decorator to call wait_until_passes(...) every time for a decorated function/method'
def wait_until_passes_decorator(func):
'Callable object that must be returned by the @always_wait_until_passes decorator'
@wraps(fu... | 4,447,544,560,799,387,000 | Decorator to call wait_until_passes(...) every time for a decorated function/method | auto1/venv/Lib/site-packages/pywinauto/timings.py | always_wait_until_passes | snakyhuman/auto-tests | python | def always_wait_until_passes(timeout, retry_interval, exceptions=Exception):
def wait_until_passes_decorator(func):
'Callable object that must be returned by the @always_wait_until_passes decorator'
@wraps(func)
def wrapper(*args, **kwargs):
'pre-callback, target function ... |
def wait_until_passes(timeout, retry_interval, func, exceptions=Exception, *args, **kwargs):
'\n Wait until ``func(*args, **kwargs)`` does not raise one of the exceptions\n\n * **timeout** how long the function will try the function\n * **retry_interval** how long to wait between retries\n * **func** ... | -3,044,909,421,197,461,000 | Wait until ``func(*args, **kwargs)`` does not raise one of the exceptions
* **timeout** how long the function will try the function
* **retry_interval** how long to wait between retries
* **func** the function that will be executed
* **exceptions** list of exceptions to test against (default: Exception)
* **args** ... | auto1/venv/Lib/site-packages/pywinauto/timings.py | wait_until_passes | snakyhuman/auto-tests | python | def wait_until_passes(timeout, retry_interval, func, exceptions=Exception, *args, **kwargs):
'\n Wait until ``func(*args, **kwargs)`` does not raise one of the exceptions\n\n * **timeout** how long the function will try the function\n * **retry_interval** how long to wait between retries\n * **func** ... |
def __getattribute__(self, attr):
'Get the value for a particular timing'
if (attr in ['__dict__', '__members__', '__methods__', '__class__']):
return object.__getattribute__(self, attr)
if (attr in dir(TimeConfig)):
return object.__getattribute__(self, attr)
if (attr in self.__default_t... | 5,758,114,577,032,665,000 | Get the value for a particular timing | auto1/venv/Lib/site-packages/pywinauto/timings.py | __getattribute__ | snakyhuman/auto-tests | python | def __getattribute__(self, attr):
if (attr in ['__dict__', '__members__', '__methods__', '__class__']):
return object.__getattribute__(self, attr)
if (attr in dir(TimeConfig)):
return object.__getattribute__(self, attr)
if (attr in self.__default_timing):
return self._timings[at... |
def __setattr__(self, attr, value):
'Set a particular timing'
if (attr == '_timings'):
object.__setattr__(self, attr, value)
elif (attr in self.__default_timing):
self._timings[attr] = value
else:
raise AttributeError('Unknown timing setting: {0}'.format(attr)) | 6,694,533,967,756,782,000 | Set a particular timing | auto1/venv/Lib/site-packages/pywinauto/timings.py | __setattr__ | snakyhuman/auto-tests | python | def __setattr__(self, attr, value):
if (attr == '_timings'):
object.__setattr__(self, attr, value)
elif (attr in self.__default_timing):
self._timings[attr] = value
else:
raise AttributeError('Unknown timing setting: {0}'.format(attr)) |
def Fast(self):
'Set fast timing values\n\n Currently this changes the timing in the following ways:\n timeouts = 1 second\n waits = 0 seconds\n retries = .001 seconds (minimum!)\n\n (if existing times are faster then keep existing times)\n '
for setting in self.__defau... | -8,164,920,440,071,308,000 | Set fast timing values
Currently this changes the timing in the following ways:
timeouts = 1 second
waits = 0 seconds
retries = .001 seconds (minimum!)
(if existing times are faster then keep existing times) | auto1/venv/Lib/site-packages/pywinauto/timings.py | Fast | snakyhuman/auto-tests | python | def Fast(self):
'Set fast timing values\n\n Currently this changes the timing in the following ways:\n timeouts = 1 second\n waits = 0 seconds\n retries = .001 seconds (minimum!)\n\n (if existing times are faster then keep existing times)\n '
for setting in self.__defau... |
def Slow(self):
'Set slow timing values\n\n Currently this changes the timing in the following ways:\n timeouts = default timeouts * 10\n waits = default waits * 3\n retries = default retries * 3\n\n (if existing times are slower then keep existing times)\n '
for settin... | -2,657,467,507,185,467,000 | Set slow timing values
Currently this changes the timing in the following ways:
timeouts = default timeouts * 10
waits = default waits * 3
retries = default retries * 3
(if existing times are slower then keep existing times) | auto1/venv/Lib/site-packages/pywinauto/timings.py | Slow | snakyhuman/auto-tests | python | def Slow(self):
'Set slow timing values\n\n Currently this changes the timing in the following ways:\n timeouts = default timeouts * 10\n waits = default waits * 3\n retries = default retries * 3\n\n (if existing times are slower then keep existing times)\n '
for settin... |
def Defaults(self):
'Set all timings to the default time'
self._timings = self.__default_timing.copy() | 2,807,953,678,227,924,500 | Set all timings to the default time | auto1/venv/Lib/site-packages/pywinauto/timings.py | Defaults | snakyhuman/auto-tests | python | def Defaults(self):
self._timings = self.__default_timing.copy() |
def wait_until_decorator(func):
'Callable object that must be returned by the @always_wait_until decorator'
@wraps(func)
def wrapper(*args, **kwargs):
'pre-callback, target function call and post-callback'
return wait_until(timeout, retry_interval, func, value, op, *args, **kwargs)
retu... | 2,517,495,585,548,496,000 | Callable object that must be returned by the @always_wait_until decorator | auto1/venv/Lib/site-packages/pywinauto/timings.py | wait_until_decorator | snakyhuman/auto-tests | python | def wait_until_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
'pre-callback, target function call and post-callback'
return wait_until(timeout, retry_interval, func, value, op, *args, **kwargs)
return wrapper |
def wait_until_passes_decorator(func):
'Callable object that must be returned by the @always_wait_until_passes decorator'
@wraps(func)
def wrapper(*args, **kwargs):
'pre-callback, target function call and post-callback'
return wait_until_passes(timeout, retry_interval, func, exceptions, *ar... | -9,218,570,426,858,793,000 | Callable object that must be returned by the @always_wait_until_passes decorator | auto1/venv/Lib/site-packages/pywinauto/timings.py | wait_until_passes_decorator | snakyhuman/auto-tests | python | def wait_until_passes_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
'pre-callback, target function call and post-callback'
return wait_until_passes(timeout, retry_interval, func, exceptions, *args, **kwargs)
return wrapper |
@wraps(func)
def wrapper(*args, **kwargs):
'pre-callback, target function call and post-callback'
return wait_until(timeout, retry_interval, func, value, op, *args, **kwargs) | -3,897,649,944,343,418,000 | pre-callback, target function call and post-callback | auto1/venv/Lib/site-packages/pywinauto/timings.py | wrapper | snakyhuman/auto-tests | python | @wraps(func)
def wrapper(*args, **kwargs):
return wait_until(timeout, retry_interval, func, value, op, *args, **kwargs) |
@wraps(func)
def wrapper(*args, **kwargs):
'pre-callback, target function call and post-callback'
return wait_until_passes(timeout, retry_interval, func, exceptions, *args, **kwargs) | -1,675,487,642,385,341,200 | pre-callback, target function call and post-callback | auto1/venv/Lib/site-packages/pywinauto/timings.py | wrapper | snakyhuman/auto-tests | python | @wraps(func)
def wrapper(*args, **kwargs):
return wait_until_passes(timeout, retry_interval, func, exceptions, *args, **kwargs) |
@pytest.fixture(autouse=True)
def mpl_test_settings(qt_module, mpl_test_settings):
'\n Ensure qt_module fixture is *first* fixture.\n\n We override the `mpl_test_settings` fixture and depend on the `qt_module`\n fixture first. It is very important that it is first, because it skips\n tests when Qt is no... | -4,220,199,978,093,792,000 | Ensure qt_module fixture is *first* fixture.
We override the `mpl_test_settings` fixture and depend on the `qt_module`
fixture first. It is very important that it is first, because it skips
tests when Qt is not available, and if not, then the main
`mpl_test_settings` fixture will try to switch backends before the skip... | tests/test_backend_qt.py | mpl_test_settings | qiujiangkun/mplopengl | python | @pytest.fixture(autouse=True)
def mpl_test_settings(qt_module, mpl_test_settings):
'\n Ensure qt_module fixture is *first* fixture.\n\n We override the `mpl_test_settings` fixture and depend on the `qt_module`\n fixture first. It is very important that it is first, because it skips\n tests when Qt is no... |
@pytest.mark.parametrize('qt_key, qt_mods, answer', [('Key_A', ['ShiftModifier'], 'A'), ('Key_A', [], 'a'), ('Key_A', ['ControlModifier'], 'ctrl+a'), ('Key_Aacute', ['ShiftModifier'], 'Á'), ('Key_Aacute', [], 'á'), ('ControlKey', ['AltModifier'], 'alt+control'), ('AltKey', ['ControlModifier'], 'ctrl+alt'), ('Key_Aacute... | 6,353,541,293,830,967,000 | Make a figure
Send a key_press_event event (using non-public, qtX backend specific api)
Catch the event
Assert sent and caught keys are the same | tests/test_backend_qt.py | test_correct_key | qiujiangkun/mplopengl | python | @pytest.mark.parametrize('qt_key, qt_mods, answer', [('Key_A', ['ShiftModifier'], 'A'), ('Key_A', [], 'a'), ('Key_A', ['ControlModifier'], 'ctrl+a'), ('Key_Aacute', ['ShiftModifier'], 'Á'), ('Key_Aacute', [], 'á'), ('ControlKey', ['AltModifier'], 'alt+control'), ('AltKey', ['ControlModifier'], 'ctrl+alt'), ('Key_Aacute... |
@pytest.mark.backend('Qt5Agg')
def test_dpi_ratio_change():
'\n Make sure that if _dpi_ratio changes, the figure dpi changes but the\n widget remains the same physical size.\n '
prop = 'matplotlib.backends.backend_qt5.FigureCanvasQT._dpi_ratio'
with mock.patch(prop, new_callable=mock.PropertyMock) ... | -8,263,528,494,087,251,000 | Make sure that if _dpi_ratio changes, the figure dpi changes but the
widget remains the same physical size. | tests/test_backend_qt.py | test_dpi_ratio_change | qiujiangkun/mplopengl | python | @pytest.mark.backend('Qt5Agg')
def test_dpi_ratio_change():
'\n Make sure that if _dpi_ratio changes, the figure dpi changes but the\n widget remains the same physical size.\n '
prop = 'matplotlib.backends.backend_qt5.FigureCanvasQT._dpi_ratio'
with mock.patch(prop, new_callable=mock.PropertyMock) ... |
def __init__(self, max_depth=10):
'\n Initializes attributes and checks the maximum depth provided.\n\n Parameters\n ----------\n max_depth : int\n The maximum depth to look in.\n '
if (max_depth < 1):
raise Exception('max_depth must be greater than or equal... | -1,162,637,250,902,903,000 | Initializes attributes and checks the maximum depth provided.
Parameters
----------
max_depth : int
The maximum depth to look in. | src/indexing/indexer.py | __init__ | pgecsenyi/piepy | python | def __init__(self, max_depth=10):
'\n Initializes attributes and checks the maximum depth provided.\n\n Parameters\n ----------\n max_depth : int\n The maximum depth to look in.\n '
if (max_depth < 1):
raise Exception('max_depth must be greater than or equal... |
def add_rule(self, directory, policy):
'\n Registers a new directory to index. Does nothing if the given directory is already added.\n\n Parameters\n ----------\n directory : str\n The directory to be indexed.\n policy : IndexerPolicy\n A policy that applies ... | -6,603,331,465,312,089,000 | Registers a new directory to index. Does nothing if the given directory is already added.
Parameters
----------
directory : str
The directory to be indexed.
policy : IndexerPolicy
A policy that applies to this directory. | src/indexing/indexer.py | add_rule | pgecsenyi/piepy | python | def add_rule(self, directory, policy):
'\n Registers a new directory to index. Does nothing if the given directory is already added.\n\n Parameters\n ----------\n directory : str\n The directory to be indexed.\n policy : IndexerPolicy\n A policy that applies ... |
def index(self):
'\n Initializes filters, initiates indexing and after the indexing process has finished, cleans filters.\n '
for analyzer in self._analyzers:
analyzer.init_filters()
for (directory, analyzer_store) in self._rules.items():
if os.path.exists(directory):
... | 3,595,322,224,351,321,000 | Initializes filters, initiates indexing and after the indexing process has finished, cleans filters. | src/indexing/indexer.py | index | pgecsenyi/piepy | python | def index(self):
'\n \n '
for analyzer in self._analyzers:
analyzer.init_filters()
for (directory, analyzer_store) in self._rules.items():
if os.path.exists(directory):
self._scan_directory(directory, analyzer_store)
for analyzer in self._analyzers:
anal... |
def _enter(self, directory):
'\n Indicates for the analyzers that we entered into the given directory.\n\n Parameters\n ----------\n directory : str\n The directory we entered.\n '
for analyzer in self._analyzers:
analyzer.enter(directory)
self._current_... | 5,542,440,193,222,605,000 | Indicates for the analyzers that we entered into the given directory.
Parameters
----------
directory : str
The directory we entered. | src/indexing/indexer.py | _enter | pgecsenyi/piepy | python | def _enter(self, directory):
'\n Indicates for the analyzers that we entered into the given directory.\n\n Parameters\n ----------\n directory : str\n The directory we entered.\n '
for analyzer in self._analyzers:
analyzer.enter(directory)
self._current_... |
def _leave(self):
'\n Indicates for the analyzers that we are leaving the last directory.\n '
for analyzer in self._analyzers:
analyzer.leave()
self._current_depth = (self._current_depth - 1) | -5,838,119,474,221,757,000 | Indicates for the analyzers that we are leaving the last directory. | src/indexing/indexer.py | _leave | pgecsenyi/piepy | python | def _leave(self):
'\n \n '
for analyzer in self._analyzers:
analyzer.leave()
self._current_depth = (self._current_depth - 1) |
def _scan_directory(self, path, analyzer_store):
'\n Does the real indexing. Iterates through the directory using DFS, and invokes the registered analyzers to\n analyze and store the data.\n\n Parameters\n ----------\n path : str\n The path to enumerate.\n analyz... | -7,786,856,763,598,390,000 | Does the real indexing. Iterates through the directory using DFS, and invokes the registered analyzers to
analyze and store the data.
Parameters
----------
path : str
The path to enumerate.
analyzers : PathAnalyzerStore
The PathAnalyzerStore to use. | src/indexing/indexer.py | _scan_directory | pgecsenyi/piepy | python | def _scan_directory(self, path, analyzer_store):
'\n Does the real indexing. Iterates through the directory using DFS, and invokes the registered analyzers to\n analyze and store the data.\n\n Parameters\n ----------\n path : str\n The path to enumerate.\n analyz... |
@property
def local_epoch(self) -> int:
"\n This worker's current epoch, kept synchronized with peers. If peer's local_epoch lags behind others, it will\n automatically re-synchronize by downloading state from another peer.\n An epoch corresponds to accumulating target_batch_size across all act... | -610,526,349,330,534,100 | This worker's current epoch, kept synchronized with peers. If peer's local_epoch lags behind others, it will
automatically re-synchronize by downloading state from another peer.
An epoch corresponds to accumulating target_batch_size across all active devices. | hivemind/optim/optimizer.py | local_epoch | MeshchaninovViacheslav/hivemind | python | @property
def local_epoch(self) -> int:
"\n This worker's current epoch, kept synchronized with peers. If peer's local_epoch lags behind others, it will\n automatically re-synchronize by downloading state from another peer.\n An epoch corresponds to accumulating target_batch_size across all act... |
def step(self, closure: Optional[Callable[([], torch.Tensor)]]=None, batch_size: Optional[int]=None, grad_scaler: Optional[GradScaler]=None):
'\n Update training progress after accumulating another local batch size. Depending on the configuration, this will\n report progress to peers, run global or lo... | 7,173,218,560,361,957,000 | Update training progress after accumulating another local batch size. Depending on the configuration, this will
report progress to peers, run global or local optimizer step, average parameters or schedule background tasks.
:param closure: A closure that reevaluates the model and returns the loss.
:param batch_size: op... | hivemind/optim/optimizer.py | step | MeshchaninovViacheslav/hivemind | python | def step(self, closure: Optional[Callable[([], torch.Tensor)]]=None, batch_size: Optional[int]=None, grad_scaler: Optional[GradScaler]=None):
'\n Update training progress after accumulating another local batch size. Depending on the configuration, this will\n report progress to peers, run global or lo... |
def _update_global_epoch(self, grad_scaler: Optional[GradScaler]) -> None:
'Depending on the configuration: aggregate gradients and/or parameters, perform global optimizer step'
assert (self._schema_hash == self._compute_schema_hash()), 'parameters or gradients changed during iteration'
_epoch_start_time = ... | -2,510,677,156,005,104,600 | Depending on the configuration: aggregate gradients and/or parameters, perform global optimizer step | hivemind/optim/optimizer.py | _update_global_epoch | MeshchaninovViacheslav/hivemind | python | def _update_global_epoch(self, grad_scaler: Optional[GradScaler]) -> None:
assert (self._schema_hash == self._compute_schema_hash()), 'parameters or gradients changed during iteration'
_epoch_start_time = time.perf_counter()
with self.tracker.pause_updates():
wait_for_trigger = None
if ... |
def _begin_averaging_gradients(self, grad_scaler: Optional[GradScaler]) -> bool:
'Begin an all-reduce round to average gradients; return True if succeeded, False if failed'
if (grad_scaler is not None):
with grad_scaler.running_global_step():
assert grad_scaler.unscale_(self)
began_avera... | -3,321,637,960,115,376,600 | Begin an all-reduce round to average gradients; return True if succeeded, False if failed | hivemind/optim/optimizer.py | _begin_averaging_gradients | MeshchaninovViacheslav/hivemind | python | def _begin_averaging_gradients(self, grad_scaler: Optional[GradScaler]) -> bool:
if (grad_scaler is not None):
with grad_scaler.running_global_step():
assert grad_scaler.unscale_(self)
began_averaging_gradients = False
if ((self.scheduled_grads is not None) and (self.scheduled_grads... |
def _check_and_accumulate_gradients(self, batch_size: int, grad_scaler: Optional[GradScaler]) -> bool:
'Check if gradients are valid, accumulate and return True; otherwise, reset and return False'
assert ((not self.use_local_updates) and (not self.auxiliary))
if ((grad_scaler is not None) and (not grad_scal... | -3,769,334,914,421,445,000 | Check if gradients are valid, accumulate and return True; otherwise, reset and return False | hivemind/optim/optimizer.py | _check_and_accumulate_gradients | MeshchaninovViacheslav/hivemind | python | def _check_and_accumulate_gradients(self, batch_size: int, grad_scaler: Optional[GradScaler]) -> bool:
assert ((not self.use_local_updates) and (not self.auxiliary))
if ((grad_scaler is not None) and (not grad_scaler.are_grads_finite(self))):
logger.log(self.status_loglevel, 'Encountered incorrect ... |
def _maybe_schedule_gradient_averaging(self) -> None:
'If next epoch is coming soon, schedule the next gradient averaging round at the estimated end of epoch'
assert self.use_gradient_averaging
if ((self.tracker.estimated_next_update_time - get_dht_time()) <= self.matchmaking_time):
if ((self.schedu... | -7,469,730,981,344,683,000 | If next epoch is coming soon, schedule the next gradient averaging round at the estimated end of epoch | hivemind/optim/optimizer.py | _maybe_schedule_gradient_averaging | MeshchaninovViacheslav/hivemind | python | def _maybe_schedule_gradient_averaging(self) -> None:
assert self.use_gradient_averaging
if ((self.tracker.estimated_next_update_time - get_dht_time()) <= self.matchmaking_time):
if ((self.scheduled_grads is None) or self.scheduled_grads.triggered or self.scheduled_grads.done()):
eta_se... |
def _maybe_schedule_state_averaging(self) -> None:
'If next epoch is coming soon, schedule the next state averaging at estimated parameter averaging start'
next_epoch = max((self.local_epoch + 1), self.tracker.global_epoch)
if ((next_epoch % self.average_state_every) != 0):
return
if self.state_... | 4,311,566,516,886,041,600 | If next epoch is coming soon, schedule the next state averaging at estimated parameter averaging start | hivemind/optim/optimizer.py | _maybe_schedule_state_averaging | MeshchaninovViacheslav/hivemind | python | def _maybe_schedule_state_averaging(self) -> None:
next_epoch = max((self.local_epoch + 1), self.tracker.global_epoch)
if ((next_epoch % self.average_state_every) != 0):
return
if self.state_averager.averaging_in_progress:
return
if (self.delay_before_state_averaging.num_updates == ... |
def _average_gradients_and_load_into_optimizer(self, maybe_step_control: Optional[StepControl]):
'Run gradient averaging; on success, feed averaged gradients into optimizer; else, use local gradients'
assert ((self.use_gradient_averaging and (maybe_step_control is None)) or maybe_step_control.triggered)
ave... | 8,258,748,080,675,111,000 | Run gradient averaging; on success, feed averaged gradients into optimizer; else, use local gradients | hivemind/optim/optimizer.py | _average_gradients_and_load_into_optimizer | MeshchaninovViacheslav/hivemind | python | def _average_gradients_and_load_into_optimizer(self, maybe_step_control: Optional[StepControl]):
assert ((self.use_gradient_averaging and (maybe_step_control is None)) or maybe_step_control.triggered)
averaged_gradients = False
try:
if (maybe_step_control is not None):
group_info = ... |
def _load_averaged_gradients_into_optimizer_(self):
'If required, load averaged gradients into optimizer; otherwise simply notify grad averager'
assert self.use_gradient_averaging
if self.offload_optimizer:
pass
else:
optimized_param_groups = self.state_averager.optimizer.param_groups
... | -1,556,350,358,990,898,400 | If required, load averaged gradients into optimizer; otherwise simply notify grad averager | hivemind/optim/optimizer.py | _load_averaged_gradients_into_optimizer_ | MeshchaninovViacheslav/hivemind | python | def _load_averaged_gradients_into_optimizer_(self):
assert self.use_gradient_averaging
if self.offload_optimizer:
pass
else:
optimized_param_groups = self.state_averager.optimizer.param_groups
optimized_parameters = [param for group in optimized_param_groups for param in group['... |
def _load_local_gradients_into_optimizer(self):
'Fallback to using local gradients in the optimizer (instead of averaged gradients)'
logger.log(self.status_loglevel, f'Proceeding with local gradients')
self.grad_averager.load_accumulators_into_averager_()
self._load_averaged_gradients_into_optimizer_() | -2,611,091,218,565,431,300 | Fallback to using local gradients in the optimizer (instead of averaged gradients) | hivemind/optim/optimizer.py | _load_local_gradients_into_optimizer | MeshchaninovViacheslav/hivemind | python | def _load_local_gradients_into_optimizer(self):
logger.log(self.status_loglevel, f'Proceeding with local gradients')
self.grad_averager.load_accumulators_into_averager_()
self._load_averaged_gradients_into_optimizer_() |
def zero_grad(self, set_to_none: bool=False):
'Reset gradients from model. If reuse_grad_buffers=True, this will raise an error.'
if (self.use_gradient_averaging and self.grad_averager.reuse_grad_buffers):
raise ValueError(f'When running {self.__class__.__name__} with reuse_grad_buffers=True, user shoul... | 5,026,697,121,043,981,000 | Reset gradients from model. If reuse_grad_buffers=True, this will raise an error. | hivemind/optim/optimizer.py | zero_grad | MeshchaninovViacheslav/hivemind | python | def zero_grad(self, set_to_none: bool=False):
if (self.use_gradient_averaging and self.grad_averager.reuse_grad_buffers):
raise ValueError(f'When running {self.__class__.__name__} with reuse_grad_buffers=True, user should never call zero_grad manually. Gradients will be refreshed internally')
for p... |
def _should_load_state_from_peers(self) -> bool:
'\n If true, peer will discard local progress and attempt to download state from peers.\n This method allows peer to continue training in two cases:\n - peer is on the same epoch as other collaborators - keep training normally\n - peer w... | -4,292,192,955,225,625,600 | If true, peer will discard local progress and attempt to download state from peers.
This method allows peer to continue training in two cases:
- peer is on the same epoch as other collaborators - keep training normally
- peer was on the same epoch and accumulated some grads, but some collaborators
have just tran... | hivemind/optim/optimizer.py | _should_load_state_from_peers | MeshchaninovViacheslav/hivemind | python | def _should_load_state_from_peers(self) -> bool:
'\n If true, peer will discard local progress and attempt to download state from peers.\n This method allows peer to continue training in two cases:\n - peer is on the same epoch as other collaborators - keep training normally\n - peer w... |
def is_synchronized_with_peers(self) -> bool:
'Checks whether the current peer is up-to-date with others in terms of the epoch (step) number.'
return (self.local_epoch >= (self.tracker.global_epoch - 1)) | 8,538,689,893,884,570,000 | Checks whether the current peer is up-to-date with others in terms of the epoch (step) number. | hivemind/optim/optimizer.py | is_synchronized_with_peers | MeshchaninovViacheslav/hivemind | python | def is_synchronized_with_peers(self) -> bool:
return (self.local_epoch >= (self.tracker.global_epoch - 1)) |
def load_state_from_peers(self, **kwargs):
'\n Attempt to load the newest collaboration state from other peers within the same run_id.\n\n If successful, this will update parameters, optimizer state, local epoch and learning rate schedule in-place.\n '
if ((self.scheduled_grads is not None)... | -3,688,123,011,394,910,000 | Attempt to load the newest collaboration state from other peers within the same run_id.
If successful, this will update parameters, optimizer state, local epoch and learning rate schedule in-place. | hivemind/optim/optimizer.py | load_state_from_peers | MeshchaninovViacheslav/hivemind | python | def load_state_from_peers(self, **kwargs):
'\n Attempt to load the newest collaboration state from other peers within the same run_id.\n\n If successful, this will update parameters, optimizer state, local epoch and learning rate schedule in-place.\n '
if ((self.scheduled_grads is not None)... |
def _tag_along_with_zero_weight(self, control: StepControl):
'Wait for a running averaging round to finish with zero weight.'
if (not control.triggered):
control.weight = 0
control.allow_allreduce()
if (not control.done()):
try:
control.result(self.averaging_timeout)
... | 6,806,298,440,358,946,000 | Wait for a running averaging round to finish with zero weight. | hivemind/optim/optimizer.py | _tag_along_with_zero_weight | MeshchaninovViacheslav/hivemind | python | def _tag_along_with_zero_weight(self, control: StepControl):
if (not control.triggered):
control.weight = 0
control.allow_allreduce()
if (not control.done()):
try:
control.result(self.averaging_timeout)
except BaseException as e:
logger.exception(e)
... |
def visualize(model: Model, structural_part=True, measurement_part=False, view=True, filename=None, title=''):
'Visualization of SEM model via graphviz library.\n\n Keyword arguments:\n model -- A SEM model.\n structural_part -- Should structural part be visualised?\n measurement_part -- Sho... | 6,237,103,513,191,551,000 | Visualization of SEM model via graphviz library.
Keyword arguments:
model -- A SEM model.
structural_part -- Should structural part be visualised?
measurement_part -- Should measurement part be visualised?
view -- Should graph be displayed?
filename -- Filename/path.
title --... | semopy/visualization.py | visualize | YoungjuneKwon/forked-semopy | python | def visualize(model: Model, structural_part=True, measurement_part=False, view=True, filename=None, title=):
'Visualization of SEM model via graphviz library.\n\n Keyword arguments:\n model -- A SEM model.\n structural_part -- Should structural part be visualised?\n measurement_part -- Shoul... |
def __len__(self):
'Returns the length of the coding, use len(my_coding).'
return len(self._data) | 423,562,225,722,425,860 | Returns the length of the coding, use len(my_coding). | Bio/Nexus/StandardData.py | __len__ | EnjoyLifeFund/macHighSierra-py36-pkgs | python | def __len__(self):
return len(self._data) |
def raw(self):
'Returns the full coding as a python list.'
return self._data | 6,180,843,810,050,607,000 | Returns the full coding as a python list. | Bio/Nexus/StandardData.py | raw | EnjoyLifeFund/macHighSierra-py36-pkgs | python | def raw(self):
return self._data |
def __str__(self):
'Returns the full coding as a python string, use str(my_coding).'
str_return = ''
for coding in self._data:
if (coding['t'] == 'multi'):
str_return += (('(' + ''.join(coding['d'])) + ')')
elif (coding['t'] == 'uncer'):
str_return += (('{' + ''.join(... | -8,687,247,177,936,933,000 | Returns the full coding as a python string, use str(my_coding). | Bio/Nexus/StandardData.py | __str__ | EnjoyLifeFund/macHighSierra-py36-pkgs | python | def __str__(self):
str_return =
for coding in self._data:
if (coding['t'] == 'multi'):
str_return += (('(' + .join(coding['d'])) + ')')
elif (coding['t'] == 'uncer'):
str_return += (('{' + .join(coding['d'])) + '}')
else:
str_return += coding['d'... |
def next(self):
'Deprecated Python 2 style alias for Python 3 style __next__ method.'
return self.__next__() | -8,216,947,362,407,348,000 | Deprecated Python 2 style alias for Python 3 style __next__ method. | Bio/Nexus/StandardData.py | next | EnjoyLifeFund/macHighSierra-py36-pkgs | python | def next(self):
return self.__next__() |
def display(wa):
'Display all the stuffs on the screen'
print('Total word count: {}'.format(len(wa.words_list(wa.normalized_text))))
print('Number of different words: {}'.format(len(wa.differents_words_list(wa.normalized_text))))
print('Total number of characters: {}'.format(len(wa.normal_text)))
pr... | -2,072,599,764,001,801,500 | Display all the stuffs on the screen | words.py | display | Layto888/Words-Analysis | python | def display(wa):
print('Total word count: {}'.format(len(wa.words_list(wa.normalized_text))))
print('Number of different words: {}'.format(len(wa.differents_words_list(wa.normalized_text))))
print('Total number of characters: {}'.format(len(wa.normal_text)))
print('Number of characters without spac... |
def lexical_density(words_list, lexi_file_name):
" calculates the lexical density.\n L_d = (N_lex / N) * 100\n Where:\n\n L_d = the analyzed text's lexical density\n\n N_lex = the number of lexical word tokens (nouns,adjectives,verbs,adverbs)\n in the analyzed text.\n\n N = the number of all token... | 1,663,756,399,478,105,900 | calculates the lexical density.
L_d = (N_lex / N) * 100
Where:
L_d = the analyzed text's lexical density
N_lex = the number of lexical word tokens (nouns,adjectives,verbs,adverbs)
in the analyzed text.
N = the number of all tokens (total number of words) in the analyzed text. | words.py | lexical_density | Layto888/Words-Analysis | python | def lexical_density(words_list, lexi_file_name):
" calculates the lexical density.\n L_d = (N_lex / N) * 100\n Where:\n\n L_d = the analyzed text's lexical density\n\n N_lex = the number of lexical word tokens (nouns,adjectives,verbs,adverbs)\n in the analyzed text.\n\n N = the number of all token... |
def deduce_language(words_list, lexi_file_name):
'\n This function will deduce language between French and English.\n Using the lexical words found on the text.\n '
with open(lexi_file_name, 'r', encoding=DEFAULT_CODEC) as fp:
lexical_words = fp.read()
lexical_words = lexical_words.split(',... | -2,685,445,105,616,604,000 | This function will deduce language between French and English.
Using the lexical words found on the text. | words.py | deduce_language | Layto888/Words-Analysis | python | def deduce_language(words_list, lexi_file_name):
'\n This function will deduce language between French and English.\n Using the lexical words found on the text.\n '
with open(lexi_file_name, 'r', encoding=DEFAULT_CODEC) as fp:
lexical_words = fp.read()
lexical_words = lexical_words.split(',... |
def show_process_time(t1_start, t1_stop, t2_start, t2_stop):
'\n function to show elapsed time.\n '
print('\n')
print('Elapsed time: {0:.4f} [sec]'.format((t1_stop - t1_start)))
print('CPU process time: {0:.4f} [sec]'.format((t2_stop - t2_start)))
print('Done.') | -4,703,739,508,452,082,000 | function to show elapsed time. | words.py | show_process_time | Layto888/Words-Analysis | python | def show_process_time(t1_start, t1_stop, t2_start, t2_stop):
'\n \n '
print('\n')
print('Elapsed time: {0:.4f} [sec]'.format((t1_stop - t1_start)))
print('CPU process time: {0:.4f} [sec]'.format((t2_stop - t2_start)))
print('Done.') |
def __init__(self, filename):
'\n Input : text file name\n Do some operations to a text and return results.\n '
with open(filename, 'r', encoding=DEFAULT_CODEC) as fp:
self.normal_text = fp.read().strip()
self.normalized_text = self.normalize_text(self.normal_text) | 135,094,893,725,222,100 | Input : text file name
Do some operations to a text and return results. | words.py | __init__ | Layto888/Words-Analysis | python | def __init__(self, filename):
'\n Input : text file name\n Do some operations to a text and return results.\n '
with open(filename, 'r', encoding=DEFAULT_CODEC) as fp:
self.normal_text = fp.read().strip()
self.normalized_text = self.normalize_text(self.normal_text) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.