max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/group_lasso/utils.py | sroet/group-lasso | 88 | 12771151 | <gh_stars>10-100
"""
"""
import numpy as np
def extract_ohe_groups(onehot_encoder):
"""Extract a vector with group indices from a scikit-learn OneHotEncoder
Arguments
---------
onehot_encoder : sklearn.preprocessing.OneHotEncoder
Returns
-------
np.ndarray
A group-vector that can be used with the group lasso regularised
linear models.
"""
if not hasattr(onehot_encoder, "categories_"):
raise ValueError(
"Cannot extract group labels from an unfitted OneHotEncoder instance."
)
categories = onehot_encoder.categories_
return np.concatenate(
[
group * np.ones_like(category)
for group, category in enumerate(categories)
]
)
| 2.8125 | 3 |
024 Binary Search/Binary_Search.py | Iftakharpy/AlgoExpert-Questions | 3 | 12771152 | <gh_stars>1-10
# Best
# time O(log(n))
# space O(1)
def binarySearch(array, target):
low = 0
high = len(array)-1
while low<=high:
mid = (low+high)//2
if array[mid] == target:
return mid
if target > array[mid]:
low = mid + 1
else:
high = mid - 1
return -1
# Good
# time O(log(n)) - n is len(array)
# space O(log(n)) - here n is max call stack size
def binarySearchHelper(array, target, low, high):
mid = (low+high)//2
# Base cases
if array[mid] == target:
return mid
if low>high:
return -1
# Recursive case
if target>array[mid]:
low = mid + 1
else:
high = mid - 1
return binarySearchHelper(array, target, low, high)
def binarySearch(array, target):
return binarySearchHelper(array, target, 0, len(array)-1)
| 3.484375 | 3 |
src/notesdir/cli.py | dendronhq/notesdir | 2 | 12771153 | <reponame>dendronhq/notesdir
"""Command-line interface for notesdir."""
import argparse
import dataclasses
from datetime import datetime
import json
from operator import itemgetter, attrgetter
import os.path
import sys
from terminaltables import AsciiTable
from notesdir.api import Notesdir
from notesdir.models import FileInfoReq, FileInfo
def _print_file_info(info: FileInfo, fields: FileInfoReq) -> None:
if fields.path:
print(f'path: {info.path}')
if fields.title:
print(f'title: {info.title}')
if fields.created:
print(f'created: {info.created}')
if fields.tags:
print(f'tags: {", ".join(sorted(info.tags))}')
if fields.links:
print('links:')
for link in info.links:
line = f'\t{link.href}'
referent = link.referent()
if referent:
line += f' -> {referent}'
print(line)
if fields.backlinks:
print('backlinks:')
for link in info.backlinks:
print(f'\t{link.referrer}')
def _info(args, nd: Notesdir) -> int:
fields = FileInfoReq.parse(args.fields[0]) if args.fields else FileInfoReq.full()
info = nd.repo.info(args.path[0], fields)
if args.json:
print(json.dumps(info.as_json()))
else:
_print_file_info(info, fields)
return 0
def _new(args, nd: Notesdir) -> int:
path = nd.new(args.template[0], args.dest)
if not args.preview:
print(f'Created {path}')
return 0
def _change(args, nd: Notesdir) -> int:
nd.change(set(args.paths),
add_tags={t.strip() for t in (args.add_tags or [''])[0].lower().split(',') if t.strip()},
del_tags={t.strip() for t in (args.del_tags or [''])[0].lower().split(',') if t.strip()},
title=args.title[0] if args.title else None,
created=datetime.fromisoformat(args.created[0]) if args.created else None)
return 0
def _mv(args, nd: Notesdir) -> int:
src = args.src[0]
dest = args.dest[0]
moves = nd.move({src: dest})
if args.json:
print(json.dumps(moves))
elif not moves == {src: dest} and not args.preview:
for k, v in moves.items():
print(f'Moved {k} to {v}')
return 0
def _organize(args, nd: Notesdir) -> int:
moves = nd.organize()
if args.json:
print(json.dumps({str(k): str(v) for k, v in moves.items()}))
elif moves and not args.preview:
for k, v in moves.items():
print(f'Moved {k} to {v}')
return 0
def _backfill(args, nd: Notesdir) -> int:
changed, errors = nd.backfill()
if not args.preview:
for path in changed:
print(f'Updated {changed}')
for error in errors:
print(repr(error), file=sys.stderr)
return 0
def _tags(args, nd: Notesdir) -> int:
query = args.query or ''
counts = nd.repo.tag_counts(query)
if args.json:
print(json.dumps(counts))
else:
tags = sorted(counts.keys())
data = [('Tag', 'Count')] + [(t, counts[t]) for t in tags]
table = AsciiTable(data)
table.justify_columns[2] = 'right'
print(table.table)
return 0
def _relink(args, nd: Notesdir) -> int:
nd.replace_path_hrefs(args.old[0], args.new[0])
return 0
def _query(args, nd: Notesdir) -> int:
query = args.query or ''
infos = [i for i in nd.repo.query(query) if os.path.isfile(i.path)]
if args.fields:
fields = FileInfoReq.parse(args.fields[0])
else:
fields = FileInfoReq(path=True, tags=True, title=True, created=True)
if args.json:
infos.sort(key=attrgetter('path'))
print(json.dumps([i.as_json() for i in infos]))
elif args.table:
# TODO make sorting / path resolution consistent with json output
data = []
for info in infos:
row = ()
if fields.path:
row += (os.path.basename(info.path),)
if fields.title:
row += (info.title or '',)
if fields.created:
row += (info.created.strftime('%Y-%m-%d') if info.created else '',)
if fields.tags:
row += ('\n'.join(sorted(info.tags)),)
if fields.links:
row += ('\n'.join(sorted({os.path.relpath(link.referent()) for link in info.links if link.referent()})),)
if fields.backlinks:
row += ('\n'.join(sorted({os.path.relpath(link.referrer) for link in info.backlinks})),)
data.append(row)
data.sort(key=itemgetter(0))
heading = ()
if fields.path:
heading += ('Filename',)
if fields.title:
heading += ('Title',)
if fields.created:
heading += ('Created',)
if fields.tags:
heading += ('Tags',)
if fields.links:
heading += ('Link paths',)
if fields.backlinks:
heading += ('Backlink paths',)
data.insert(0, heading)
table = AsciiTable(data)
print(table.table)
else:
for info in infos:
print('--------------------')
_print_file_info(info, fields)
return 0
def argparser() -> argparse.ArgumentParser:
fields_help = f'Possible fields are: {", ".join(f.name for f in dataclasses.fields(FileInfoReq))}.'
parser = argparse.ArgumentParser()
parser.set_defaults(func=None, preview=False)
subs = parser.add_subparsers(title='Commands')
p_i = subs.add_parser('info', help='Show info about a file, such as metadata and links/backlinks.')
p_i.add_argument('-f', '--fields', nargs=1,
help=f'Comma-separated list of fields to show. {fields_help} By default, all fields are shown.')
p_i.add_argument('-j', '--json', action='store_true', help='Output as JSON.')
p_i.add_argument('path', nargs=1)
p_i.set_defaults(func=_info)
p_q = subs.add_parser(
'query',
help='Query for files. For full query syntax, see the documentation of '
'notesdir.models.FileQuery.parse - an example query is "tag:foo sort:title,-created".')
p_q.add_argument('query', nargs='?', help='Query string. If omitted, the query matches all files.')
p_q.add_argument('-f', '--fields', nargs=1,
help=f'Comma-separated list of fields to show. {fields_help} Not all fields are shown by default.')
p_q_formats = p_q.add_mutually_exclusive_group()
p_q_formats.add_argument('-j', '--json', help='Output as JSON.', action='store_true')
p_q_formats.add_argument('-t', '--table', help='Format output as a table.', action='store_true')
p_q.set_defaults(func=_query)
p_c = subs.add_parser('new',
help='Create new file from a Mako template. You can either specify the path to the template, '
'or just give its name without file extensions if it is listed in "templates" in '
'your ~/notesdir.conf.py file. '
'This command will print the path of the newly created file.')
p_c.add_argument('template', nargs=1, help='Name or path of template.')
p_c.add_argument('dest', nargs='?',
help='Suggested destination filename. This may be overridden by the template, or adjusted '
'if it conflicts with an existing file. A filename will be selected for you if omitted.')
p_c.add_argument('-p', '--preview', action='store_true', help='Print plan but do not create file')
p_c.set_defaults(func=_new)
p_change = subs.add_parser('change', help='Update metadata of the specified files.')
p_change.add_argument('paths', nargs='+', help='Files to update.')
p_change.add_argument('-a', '--add-tags', nargs=1,
help='Comma-separated list of tags to add (if not already present).')
p_change.add_argument('-d', '--del-tags', nargs=1,
help='Comma-separated list of tags to remove (if present).')
p_change.add_argument('-t', '--title', nargs=1, help='New title for files')
p_change.add_argument('-c', '--created', nargs=1, help='New created datetime for files, in ISO8601 format')
p_change.add_argument('-p', '--preview', action='store_true',
help='Print changes to be made but do not change files')
p_change.set_defaults(func=_change)
p_mv = subs.add_parser(
'mv',
help='Move a file. Any links to the file from other files in your configured notes directories will be '
'updated to point to the new location, provided the referrers are of supported file types. '
'Relative links from this file to other files will also be updated, if this file is of a supported file '
'type.')
p_mv.add_argument('src', help='File or folder to move.', nargs=1)
p_mv.add_argument('dest', nargs=1,
help='New file path or new parent folder. If the argument is a folder, notesdir will try to '
'keep the original filename. In either case, this command will not overwrite an existing '
'file; it will adjust the new filename if needed to be unique within the target directory.')
p_mv.add_argument('-j', '--json', action='store_true',
help='Output as JSON. The output is an object whose keys are the paths of files that were '
'moved, and whose values are the new paths of those files.')
p_mv.add_argument('-p', '--preview',
action='store_true', help='Print changes to be made but do not move or change files')
p_mv.set_defaults(func=_mv)
p_org = subs.add_parser(
'organize',
help='Organize files. All files within the directories configured in conf.repo_conf.root_paths will be '
'passed to the function defined in conf.path_organizer, and will be moved if it returns a new path. '
'New folders will be created when necessary and empty folders will be deleted. As with the mv command, '
'relative links between files will be updated, if the file type of the referrer is supported.')
p_org.add_argument('-j', '--json', action='store_true',
help='Output as JSON. The output is an object whose keys are the paths of files that were '
'moved, and whose values are the new paths of those files.')
p_org.add_argument('-p', '--preview', action='store_true',
help='Print changes to be made but do not move or change files')
p_org.set_defaults(func=_organize)
p_backfill = subs.add_parser(
'backfill',
help='Backfill missing metadata. All files within the directories configured in conf.repo_conf.root_paths '
'will be checked for title and created date metadata. If the title is missing, a title is set based '
'on the filename; if created is missing, it is set based on the file\'s birthtime or ctime. '
'Errors will be printed but will not result in a nonzero return status, since it is expected that '
'some files in your notes directories will not be supported by notesdir.')
p_backfill.add_argument('-p', '--preview', action='store_true',
help='Print changes to be made but do not change files')
p_backfill.set_defaults(func=_backfill)
p_tags_count = subs.add_parser(
'tags',
help='Show a list of tags and the number of files that have each tag.')
p_tags_count.add_argument('query', nargs='?',
help='Query to filter files by. If omitted, data for all files is shown. The query '
'format is the same as for the `query` command.')
p_tags_count.add_argument('-j', '--json', action='store_true',
help='Output as JSON. The output is an object whose keys are tags and whose values '
'are the number of notes that matched the query and also possess that tag.')
p_tags_count.set_defaults(func=_tags)
p_relink = subs.add_parser(
'relink',
help='Replace all links to one file with links to another. Note that this does not '
'currently replace links to children of the original path - e.g., if the '
'old path is "/foo/bar", a link to "/foo/bar/baz" will not be updated. '
'No files are moved, and this command does not care whether or not the old '
'or new paths refer to actual files.')
p_relink.add_argument('old', nargs=1)
p_relink.add_argument('new', nargs=1)
p_relink.add_argument('-p', '--preview', action='store_true',
help='Print changes to be made but do not change files')
p_relink.set_defaults(func=_relink)
return parser
def main(args=None) -> int:
"""Runs the tool and returns its exit code.
args may be an array of string command-line arguments; if absent,
the process's arguments are used.
"""
parser = argparser()
args = parser.parse_args(args)
if not args.func:
parser.print_help()
return 1
with Notesdir.for_user() as nd:
if args.preview:
nd.repo.conf.preview_mode = True
return args.func(args, nd)
| 2.53125 | 3 |
woollylib/utils/utils.py | woolly-of-cv/woolly-lib | 0 | 12771154 | <filename>woollylib/utils/utils.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torchsummary import summary
torch.manual_seed(1)
def get_device() -> tuple:
"""Get Device type
Returns:
tuple: Device type
"""
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
return (use_cuda, device)
def print_summary(model, input_size=(1, 28, 28)):
"""Print Model summary
Args:
model (Net): Model Instance
input_size (tuple, optional): Input size. Defaults to (1, 28, 28).
"""
summary(model, input_size=input_size)
def print_modal_summary(model):
"""Print Model summary
Args:
model (Net): Model Instance
"""
print(f'----------------------------------------------------------------')
print(f'| {"Name":25}\t{"Shape":20}\tParams |')
print(f'----------------------------------------------------------------')
total = 0
for name, param in model.named_parameters():
if param.requires_grad:
count = np.prod(list(param.data.shape))
total += count
nout = name.split('.')
nout = ".".join(nout[:len(nout)-1])
print(f'| {nout:25}\t{str(list(param.data.shape)):20}\t{count:6} |')
print(f'----------------------------------------------------------------')
print(f'| {"Total":25}\t{"":20}\t{total:6} |')
print(f'----------------------------------------------------------------')
def initialize_weights(m):
"""Function to initialize random weights
Args:
m (nn.Module): Layer instance
"""
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias.data, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight.data, 1)
nn.init.constant_(m.bias.data, 0)
def load_weights_from_path(model, path):
"""load weights from file
Args:
model (Net): Model instance
path (str): Path to weights file
Returns:
Net: loaded model
"""
model.load_state_dict(torch.load(path))
return model
def get_all_predictions(model, loader, device):
"""Get All predictions for model
Args:
model (Net): Trained Model
loader (Dataloader): instance of dataloader
device (str): Which device to use cuda/cpu
Returns:
tuple: all predicted values and their targets
"""
model.eval()
all_preds = torch.tensor([]).to(device)
all_targets = torch.tensor([]).to(device)
with torch.no_grad():
for data, target in loader:
data, targets = data.to(device), target.to(device)
all_targets = torch.cat(
(all_targets, targets),
dim=0
)
output = model(data)
preds = output.argmax(dim=1)
all_preds = torch.cat(
(all_preds, preds),
dim=0
)
return all_preds, all_targets
def get_incorrrect_predictions(model, loader, device):
"""Get all incorrect predictions
Args:
model (Net): Trained model
loader (DataLoader): instance of data loader
device (str): Which device to use cuda/cpu
Returns:
list: list of all incorrect predictions and their corresponding details
"""
model.eval()
incorrect = []
with torch.no_grad():
for data, target in loader:
data, target = data.to(device), target.to(device)
output = model(data)
loss = F.nll_loss(output, target)
pred = output.argmax(dim=1)
for d, t, p, o in zip(data, target, pred, output):
if p.eq(t.view_as(p)).item() == False:
incorrect.append(
[d.cpu(), t.cpu(), p.cpu(), o[p.item()].cpu()])
return incorrect
def prepare_confusion_matrix(all_preds, all_targets, class_map):
"""Prepare Confusion matrix
Args:
all_preds (list): List of all predictions
all_targets (list): List of all actule labels
class_map (dict): Class names
Returns:
tensor: confusion matrix for size number of classes * number of classes
"""
stacked = torch.stack((
all_targets, all_preds
),
dim=1
).type(torch.int64)
no_classes = len(class_map)
# Create temp confusion matrix
confusion_matrix = torch.zeros(no_classes, no_classes, dtype=torch.int64)
# Fill up confusion matrix with actual values
for p in stacked:
tl, pl = p.tolist()
confusion_matrix[tl, pl] = confusion_matrix[tl, pl] + 1
return confusion_matrix
| 2.46875 | 2 |
rx/operators/observable/blocking/last.py | yutiansut/RxPY | 1 | 12771155 | from typing import Any
def last(self) -> Any:
"""Blocks until the last element emits from a BlockingObservable.
If no item is emitted when on_completed() is called, an exception is thrown
Note: This will block even if the underlying Observable is
asynchronous.
Returns the last item to be emitted from a BlockingObservable
"""
last_item = None
is_empty = True
for item in self.to_iterable():
is_empty = False
last_item = item
if is_empty:
raise Exception("No items were emitted")
return last_item
def last_or_default(self, default_value: Any) -> Any:
"""Blocks until the last element emits from a BlockingObservable.
If no item is emitted when on_completed() is called, the provided
default_value will be returned
Note: This will block even if the underlying Observable is
asynchronous.
Keyword arguments:
default_value -- Value to return if no value has been emitted.
Returns the last item to be emitted from a BlockingObservable
"""
last_item = None
is_empty = True
for item in self.to_iterable():
is_empty = False
last_item = item
if is_empty:
return default_value
return last_item
| 3.53125 | 4 |
arch/arch_spade_flex.py | kimhyeongbok/SDGAN | 0 | 12771156 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 14:54:12 2018
@author: maximov
"""
import torch
import torch.nn as nn
import torch.utils.data
from torch.nn import functional as F
from arch.base_network import BaseNetwork
from arch.normalization import get_nonspade_norm_layer
from arch.architecture import ResnetBlock as ResnetBlock
from arch.architecture import SPADEResnetBlock as SPADEResnetBlock
# from base_network import BaseNetwork
# from normalization import get_nonspade_norm_layer
# from architecture import ResnetBlock as ResnetBlock
# from architecture import SPADEResnetBlock as SPADEResnetBlock
class Args():
num_upsampling_layers = 'normal'
ngf = 64
norm_G = 'spectralspadesyncbatch3x3'
semantic_nc = 11
ndf = 64
output_nc = 3
label_nc = 11
no_instance = True
# main architecture. use concatenation
class Generator(nn.Module):
def __init__(self, input_nc=11, num_classes=1200, encode_one_hot = True, img_size=128, **kwargs):
super(Generator, self).__init__()
self.in_dim = input_nc
self.encode_one_hot = encode_one_hot
self.img_size = img_size
opt =Args()
# align the back ground with semantic
self.align_bg_conv = nn.Conv2d(3, input_nc, 3, padding=1)
input_ch = input_nc
# follow SPADE ResNet
if img_size==128:
self.conv0 = SPADEResnetBlock(input_ch, 32, opt)
input_ch = 32
self.conv1 = SPADEResnetBlock(input_ch, 64, opt)
self.conv2 = SPADEResnetBlock(64, 128, opt)
self.conv3 = SPADEResnetBlock(128,256, opt)
self.conv4 = SPADEResnetBlock(256, 256, opt)
self.res1 = ResidualBlock(256)
self.res2 = ResidualBlock(256)
self.res3 = ResidualBlock(256)
self.res4 = ResidualBlock(256)
# embed onehot with image
self.embed = nn.Sequential(
ConvLayer(512, 256, kernel_size=3, stride=1),
nn.InstanceNorm2d(256, affine=True),
)
self.up = nn.Upsample(scale_factor=2)
self.deconv4 = SPADEResnetBlock(256, 256, opt)
self.deconv3 = SPADEResnetBlock(256, 128, opt)
self.deconv2 = SPADEResnetBlock(128, 64, opt)
self.deconv1 = SPADEResnetBlock(64, 32, opt)
if img_size == 128:
self.deconv0 = SPADEResnetBlock(32, 16, opt)
self.conv_end = nn.Sequential(nn.Conv2d(16, 3, kernel_size=3, stride=1, padding=1),)
self.flag_onehot = encode_one_hot
if encode_one_hot:
self.encode_one_hot = nn.Sequential(
nn.Linear(num_classes, 256), nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 256), nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 256), nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 256), nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 512), nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 1024), nn.LeakyReLU(0.2, inplace=True),
nn.Linear(1024, 2048), nn.LeakyReLU(0.2, inplace=True),
#nn.LeakyReLU(0.2, inplace=True),
)
self.encode_noise = nn.Sequential(
ConvLayer(32, 64, kernel_size=3, stride=1),
nn.LeakyReLU(0.2, inplace=True),
nn.InstanceNorm2d(64, affine=True),
ConvLayer(64, 128, kernel_size=3, stride=1),
nn.LeakyReLU(0.2, inplace=True),
nn.InstanceNorm2d(128, affine=True),
ConvLayer(128, 256, kernel_size=3, stride=1),
nn.LeakyReLU(0.2, inplace=True),
nn.InstanceNorm2d(256, affine=True),
)
else:
self.encode_one_hot = None
def convblock(self, in_ch,out_ch, krn_sz = 3):
block = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=krn_sz, stride=1, padding=int(krn_sz/2)),
#nn.BatchNorm2d(out_ch),
nn.LeakyReLU(0.2, inplace=True),
)
return block
def forward(self, seg, bg, onehot=None, high_res=0):
# step 1: encode bg to align semantic
# step 2: encode semantic?
# step 3: extract latent with bg and semantic
# step 4: upsampling
bg = self.align_bg_conv(bg)
# Encode
if self.img_size==128:
out = self.conv0(bg, seg)
# print(out.size(), seg.size())
out = self.conv1(out, seg) # [B, 64, 32, 32]
out = F.avg_pool2d(out, 2)
# seg = F.avg_pool2d(seg, 2)
# print(out.size(), seg.size())
out = self.conv2(out, seg) # [B, 128, 16, 16]
out = F.avg_pool2d(out, 2)
# seg = F.avg_pool2d(seg, 2)
# print(out.size(), seg.size())
out = self.conv3(out, seg) # [B, 256, 8, 8]
out = F.avg_pool2d(out, 2)
# seg = F.avg_pool2d(seg, 2)
# print(out.size(), seg.size())
out = self.conv4(out, seg) # [B, 256, 4, 4]
out = F.avg_pool2d(out, 2)
# seg = F.avg_pool2d(seg, 2)
# print(out.size(), seg.size())
# Embedding
if onehot is not None and self.flag_onehot:
noise = self.encode_one_hot(onehot)
noise = noise.view(-1, 32, 8, 8)
noise = self.encode_noise(noise)
# print(noise.size(), out.size())
out = torch.cat((out, noise), 1)
out = self.embed(out)
# Residual layers
out = self.res1(out)
out = self.res2(out)
out = self.res3(out)
out = self.res4(out)
# Decode
out = self.up(out)
out = self.deconv4(out,seg) # [B, 256, 8, 8]
out = self.up(out)
out = self.deconv3(out,seg) # [B, 128, 16, 16]
out = self.up(out)
out = self.deconv2(out,seg) # [B, 64, 32, 32]
out = self.deconv1(out,seg) # [B, 32, 64, 64]
# print(out.size())
if self.img_size==128:
out = self.deconv0(out, seg)
out = self.up(out)
# print(out.size())
# print(self.img_size, out.size())
out = self.conv_end(out) # [B, 3, 64, 64]
#out = torch.sigmoid(out)
# print(out.size())
return out
class Discriminator(nn.Module):
def __init__(self, input_nc=3, num_classes=1200, img_size=64, **kwargs):
super(Discriminator, self).__init__()
self.img_size = img_size
self.conv1 = ResidualBlockDown(input_nc, 64)
self.conv2 = ResidualBlockDown(64, 128)
self.conv3 = ResidualBlockDown(128, 256)
self.conv4 = ResidualBlockDown(256, 512)
if img_size==128:
self.conv5 = ResidualBlockDown(512, 512)
self.dense0 = nn.Linear(8192, 1024)
self.dense1 = nn.Linear(1024, 1)
def forward(self, x, high_res=0):
out = x # [B, 6, 64, 64]
# Encode
out_0 = (self.conv1(out)) # [B, 64, 32, 32]
out_1 = (self.conv2(out_0)) # [B, 128, 16, 16]
out_3 = (self.conv3(out_1)) # [B, 256, 8, 8]
out = (self.conv4(out_3)) # [B, 512, 4, 4]
if self.img_size==128:
out = (self.conv5(out)) # [B, 512, 4, 4]
out = out.view(out.size(0), -1)
out = F.leaky_relu(self.dense0(out), 0.2, inplace=True)
out = F.leaky_relu(self.dense1(out), 0.2, inplace=True)
return out
# region Residual Blocks
class ResidualBlockDown(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=None):
super(ResidualBlockDown, self).__init__()
# Right Side
self.conv_r1 = ConvLayer(in_channels, out_channels, kernel_size, stride, padding)
self.conv_r2 = ConvLayer(out_channels, out_channels, kernel_size, stride, padding)
# Left Side
self.conv_l = ConvLayer(in_channels, out_channels, 1, 1)
def forward(self, x):
residual = x
# Right Side
out = F.relu(x)
out = self.conv_r1(out)
out = F.relu(out)
out = self.conv_r2(out)
out = F.avg_pool2d(out, 2)
# Left Side
residual = self.conv_l(residual)
residual = F.avg_pool2d(residual, 2)
# Merge
out = residual + out
return out
class ResidualBlockUp(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, upsample=2):
super(ResidualBlockUp, self).__init__()
# General
self.upsample = nn.Upsample(scale_factor=upsample, mode='nearest')
# Right Side
self.norm_r1 = nn.InstanceNorm2d(in_channels, affine=True)
self.conv_r1 = ConvLayer(in_channels, out_channels, kernel_size, stride)
self.norm_r2 = nn.InstanceNorm2d(out_channels, affine=True)
self.conv_r2 = ConvLayer(out_channels, out_channels, kernel_size, stride)
# Left Side
self.conv_l = ConvLayer(in_channels, out_channels, 1, 1)
def forward(self, x):
residual = x
# Right Side
out = self.norm_r1(x)
out = F.relu(out)
out = self.upsample(out)
out = self.conv_r1(out)
out = self.norm_r2(out)
out = F.relu(out)
out = self.conv_r2(out)
# Left Side
residual = self.upsample(residual)
residual = self.conv_l(residual)
# Merge
out = residual + out
return out
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = nn.InstanceNorm2d(channels, affine=True)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.in1(out)
out = F.relu(out)
out = self.conv2(out)
out = self.in2(out)
out = out + residual
return out
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=None):
super(ConvLayer, self).__init__()
if padding is None:
padding = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(padding)
self.conv2d = nn.utils.spectral_norm(nn.Conv2d(in_channels, out_channels, kernel_size, stride))
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
# endregion
# if __name__ == '__main__':
# gnet = Generator()
# semantic_input = torch.ones(( 2, 11, 128, 128 ))
# bg_input = torch.ones(( 2,3,128,128 ))
# onehot_input = torch.zeros((2, 1200))
# output = gnet(semantic_input, bg_input, onehot_input)
| 2.234375 | 2 |
lambda/src/compiler/handler.py | jack-e-tabaska/BayerCLAW | 7 | 12771157 | import logging
from pkg.compiler import compile_template
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event: dict, context: object) -> dict:
# event = {
# "requestId": "1234567890",
# "fragment": {...}
# }
ret = event.copy()
try:
ret["fragment"] = compile_template(event["fragment"])
ret["status"] = "success"
except Exception as e:
# https://stackoverflow.com/questions/55190232/aws-cloudformation-transform-how-do-i-properly-return-an-error-message
logger.exception("failed: ")
ret["status"] = "failure"
ret["errorMessage"] = str(e)
finally:
return ret
| 2.203125 | 2 |
shop/shop/context_processors.py | Anych/mila-iris | 0 | 12771158 | <filename>shop/shop/context_processors.py
from carts.utils import _cart_id
from category.models import Category
from carts.models import Cart, CartItem
def menu_links(request):
cloth_categories = Category.objects.get(slug='clothes').get_descendants(include_self=False)
shoe_categories = Category.objects.get(slug='shoes').get_descendants(include_self=False)
accessor_categories = Category.objects.get(slug='accessories').get_descendants(include_self=False)
return dict(cloth_categories=cloth_categories, shoe_categories=shoe_categories,
accessor_categories=accessor_categories)
def counter(request):
cart_count = 0
if 'admin' in request.path:
return {}
else:
try:
cart = Cart.objects.filter(cart_id=_cart_id(request))
if request.user.is_authenticated:
cart_items = CartItem.objects.all().filter(user__email=request.user.email)
else:
cart_items = CartItem.objects.all().filter(cart=cart[:1])
for cart_item in cart_items:
cart_count += cart_item.quantity
except Cart.DoesNotExist:
cart_count = 0
return dict(cart_count=cart_count) | 2.125 | 2 |
loadtestmodel.py | The-bot-makers/FireFighterEngine | 1 | 12771159 | import tensorflow as tf
import numpy as np
import chess
#load the saved model
model=tf.keras.models.load_model('openlock_model')
#rest explained in nntest.py
probmodel=tf.keras.Sequential([
model,
tf.keras.layers.Softmax()
])
PieceNum={'p':'0','n':'1','b':'2','r':'3','q':'4','k':'5','.':'6'}
def numreprgen(repres):
splted=[repres[0:8],repres[8:16],repres[16:24],repres[24:32],repres[32:40],repres[40:48],repres[48:56],repres[56:64]]
numsplted=[]
for j in splted:
toappend=[]
for k in j:
toappend.append(PieceNum[k.lower()])
numsplted.append(toappend)
for j in range(len(numsplted)):
for k in range(8):
numsplted[j][k]=int(numsplted[j][k])/6.0
return numsplted
def reprgener(fen):
brd=chess.Board()
brd.set_fen(fen)
bb=chess.BaseBoard()
bb.set_board_fen(brd.board_fen())
pcmap=bb.piece_map()
repres=[]
for i in range(64):
if i in pcmap:
repres.append(pcmap[i].symbol())
else:
repres.append('.')
strrepres=''.join([elem for elem in repres])
return strrepres
testfen='r4r1k/p5p1/1pRq2np/5p2/7P/P4BP1/1P2QP2/2K1R3 b - - 0 1'
probs=probmodel(np.array([numreprgen(reprgener(testfen))]))
print(np.argmax(probs))
print(probs) | 2.40625 | 2 |
src/apim/apim_update.py | paul-mateos/azure-apim-deployment-utils | 14 | 12771160 | import sys
import apim_commands
if len(sys.argv) < 2:
print "Usage:"
print " python apim_update.py <config dir>"
sys.exit(1)
if not apim_commands.apim_update(sys.argv[1]):
sys.exit(1)
sys.exit(0)
| 2.4375 | 2 |
seahub/base/registration_urls.py | evrimguner/seahub | 1 | 12771161 | <gh_stars>1-10
from django.conf.urls.defaults import *
# from django.views.generic.simple import direct_to_template
from django.views.generic import TemplateView
from django.conf import settings
from registration.views import activate
from registration.views import register
from seahub.base.accounts import RegistrationForm, DetailedRegistrationForm
from seahub.base.generic import DirectTemplateView
form_class = DetailedRegistrationForm if settings.REQUIRE_DETAIL_ON_REGISTRATION \
else RegistrationForm
reg_dict = { 'backend': 'seahub.base.accounts.RegistrationBackend',
'form_class': form_class,
}
if settings.ACTIVATE_AFTER_REGISTRATION == True:
reg_dict['success_url'] = settings.SITE_ROOT
urlpatterns = patterns('',
url(r'^activate/complete/$',
TemplateView.as_view(template_name='registration/activation_complete.html'),
name='registration_activation_complete'),
# Activation keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]{40} because a bad activation key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
url(r'^activate/(?P<activation_key>\w+)/$',
activate,
{ 'backend': 'seahub.base.accounts.RegistrationBackend', },
name='registration_activate'),
(r'', include('registration.auth_urls')),
)
try:
from seahub.settings import CLOUD_MODE
except ImportError:
CLOUD_MODE = False
from seahub.settings import ENABLE_SIGNUP
if ENABLE_SIGNUP:
urlpatterns += patterns('',
url(r'^register/$',
register,
reg_dict,
name='registration_register'),
url(r'^register/complete/$',
DirectTemplateView.as_view(
template_name='registration/registration_complete.html',
extra_context={ 'send_mail': settings.REGISTRATION_SEND_MAIL } ),
name='registration_complete'),
url(r'^register/closed/$',
TemplateView.as_view(template_name='registration/registration_closed.html'),
name='registration_disallowed'),
)
| 1.835938 | 2 |
2021/1/1b.py | kristianwiklund/AOC2019 | 3 | 12771162 | <reponame>kristianwiklund/AOC2019
#!/usr/bin/python3
import fileinput
import rolling
l=list()
for line in fileinput.input():
l.append(int(line))
rl = list(rolling.Sum(l,3))
zl = list(zip(rl[:-1],rl[1:]))
def c(t):
return 1 if t[0]<t[1] else 0
print(sum(list(map(c,zl))))
| 3.234375 | 3 |
examples/flask/flask_sam.py | jpmn/sqlalchemy-media | 0 | 12771163 |
import json
import functools
from os import path, mkdir, getcwd
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import TypeDecorator, Unicode
from sqlalchemy_media import Image, ImageValidator, ImageProcessor, ImageAnalyzer, StoreManager, \
FileSystemStore
from sqlalchemy_media.constants import MB, KB
WORKING_DIR = path.abspath(getcwd())
TEMP_PATH = path.join(WORKING_DIR, 'static', 'avatars')
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///demo.db'
db = SQLAlchemy(app)
StoreManager.register(
'fs',
functools.partial(FileSystemStore, TEMP_PATH, 'http://localhost:5000/static/avatars'),
default=True
)
class MasterPageView(object):
header = '<!DOCTYPE html><head><meta charset="utf-8"><title>%s</title></head><body>'
footer = '</body>'
def __init__(self, title='demo', body=''):
self.title = title
self.body = body
def __str__(self):
return (self.header % self.title) + self.body + self.footer
def __iadd__(self, other):
self.body += other if isinstance(other, str) else str(other)
return self
def __iter__(self):
return iter(str(self).splitlines())
class Json(TypeDecorator):
impl = Unicode
def process_bind_param(self, value, engine):
return json.dumps(value)
def process_result_value(self, value, engine):
if value is None:
return None
return json.loads(value)
class Avatar(Image):
__auto_coercion__ = True
__pre_processors__ = [
ImageAnalyzer(),
ImageValidator(
minimum=(10, 10),
maximum=(3840, 3840),
content_types=('image/jpeg', 'image/png', 'image/gif'),
min_aspect_ratio=1,
max_aspect_ratio=1
),
ImageProcessor(fmt='jpeg', width=128)
]
__max_length__ = 6*MB
__min_length__ = 10*KB
class Person(db.Model):
__tablename__ = 'person'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(Unicode)
avatar = db.Column(Avatar.as_mutable(Json))
@app.errorhandler(500)
def internal_error(exception):
app.logger.error(exception)
return "500"
@app.route("/", methods=['GET', 'POST'])
def index():
page = MasterPageView('Index')
page += '<form method="POST" action="/" enctype="multipart/form-data">'
page += '<input type="text" name="name" value="Your Name here"/>'
page += '<input type="file" name="avatar" />'
page += '<input type="submit" />'
page += '</form>'
page += '<hr />'
with StoreManager(db.session()):
if request.method == 'POST':
new_person = Person(name=request.form['name'], avatar=request.files['avatar'])
db.session.add(new_person)
db.session.commit()
page += '<ul>'
for p in db.session.query(Person):
page += '<li>'
page += '<img src="%s" alt="%s">' % (p.avatar.locate(), p.name)
page += '<h2>%s</h2>' % p.name
page += '<h2>ID: %s</h2>' % p.id
page += '</li>'
page += '</ul>'
return str(page)
if __name__ == "__main__":
if not path.exists(TEMP_PATH):
mkdir(TEMP_PATH)
db.create_all()
app.run()
| 2.359375 | 2 |
portfolio/templatematching/tests/factories.py | Pompeiro/portfolio | 0 | 12771164 | <reponame>Pompeiro/portfolio
import factory
import factory.fuzzy
from django.core.files.base import ContentFile
from ..models import NEEDLE_CHOICES, UploadedImage
class UploadedImageFactory(factory.django.DjangoModelFactory):
image = factory.LazyAttribute(
lambda _: ContentFile(
factory.django.ImageField()._make_data({"width": 1024, "height": 768}),
"example.jpg",
)
)
needle = factory.fuzzy.FuzzyChoice([x[0] for x in NEEDLE_CHOICES])
threshold = factory.fuzzy.FuzzyFloat(0.0, 1.0)
class Meta:
model = UploadedImage
| 2.25 | 2 |
src/gripper.py | srkiyengar/Gripper | 0 | 12771165 | <reponame>srkiyengar/Gripper<gh_stars>0
__author__ = 'srkiyengar'
import pygame
import logging
import logging.handlers
from datetime import datetime
import reflex
import screen_print as sp
import joystick as js
import threading
import time
POS_ERROR = 20
#logger
LOG_LEVEL = logging.DEBUG
LOG_FILENAME = 'Gripper' + datetime.now().strftime('%Y-%m-%d---%H:%M:%S')
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
SCAN_RATE = 20 #1(one) second divided by scan rate is the loop checking if the main program should die
gp_servo=[0,0,0,0,0] # position 0 is not used 1 to 4 represent servos 1 to 4
previous_gp = [0,0,0,0,0] #holds the previous value
joy_loop_rate = 1000 # in microsecond
reflex_loop_rate = 16000 #in microsecond
my_lock = threading.Lock()
last_time = datetime.now()
reflex_command_loop = True
joy_measurement_loop = True
joy_moved = False
def stop_joy_loop():
global joy_measurement_loop
joy_measurement_loop = False
def stop_reflex_loop():
global reflex_command_loop
reflex_command_loop = False
def update_joy_displacement(my_joy, palm,e2):
'''
Displacement of Logitech Extreme 3D Joystick Axis 0 and 1 are updated into the gp_servo list at the rate set by
'joy_loop_rate'
:param my_joy: to get the displacement of Axis 0 (pre-shape) and 1 (aperture)
:return: None
'''
last_joy_time = last_time
counter = 1
global joy_moved
while (joy_measurement_loop):
e2.wait() # This is used to pause the thread in case we want to calibrate the gripper
present_time = datetime.now()
delta_t = present_time - last_joy_time
delta_t = delta_t.seconds*1000000 + delta_t.microseconds
if delta_t < joy_loop_rate:
delay = joy_loop_rate - delta_t
delay = delay/1000000.0
time.sleep(delay)
#d1 = [1,1,1]
#d2 = [1,1,1]
measurement_time = datetime.now()
d1 = my_joy.get_axis_displacement_and_grip(1) #Axis 1 - Aperture
d0 = my_joy.get_axis_displacement_and_grip(0) #Axis 0 - preshape
# d0 and d1 are lists for axis 0 and axis 1 List Index 1 - moveby; Index 2 - direction
aperture_change = d1[1]*d1[2]
pre_shape = d0[1]*d0[2]
if (aperture_change != 0 or pre_shape != 0):
my_logger.info('d1[1] = {}, d1[2] = {}, d0[1] = {}, d0[2] = {}'.format(d1[1],d1[2], d0[1], d0[2]))
my_logger.info('Joy Counter: {} - Time of Joy displacement: {} Move Fingers by {} Preshape by {}'.
format(counter, str(measurement_time)[17:],aperture_change, pre_shape))
# We have to consider if the servo rotation is + 1 or -1 before we can add
#my_logger.info('Update Servo 1 from {} to {} '.format(gp_servo[1], gp_servo[1]+aperture_change))
#my_logger.info('Update Servo 2 from {} to {} '.format(gp_servo[2], gp_servo[2]-aperture_change))
#my_logger.info('Update Servo 3 from {} to {} '.format(gp_servo[3], gp_servo[3]+aperture_change))
#my_logger.info('Update Servo 4 from {} to {} '.format(gp_servo[4], gp_servo[4]-pre_shape))
my_logger.info('Goal position before update {} '.format(gp_servo))
with my_lock:
joy_moved = True #This is a flag to indicate Joystick is displaced from rest position.
gp_servo[1] = gp_servo[1] + aperture_change
gp_servo[2] = gp_servo[2] - aperture_change
gp_servo[3] = gp_servo[3] + aperture_change
gp_servo[4] = gp_servo[4] - pre_shape
my_logger.info('Goal position after update {} '.format(gp_servo))
my_logger.info('Joy displacement flag is {} '.format(joy_moved))
counter += 1
# We have to check if the computed gp is within limits
for i in range(1,5,1):
np = palm.is_finger_within_limit(i, gp_servo[i])
if np > 0: # np > 1 is valid when outside limit, np will be the limit value of the servo position
gp_servo[i] = np
else:
raise RuntimeError('servo finger joint rotation error\n')
last_joy_time = present_time
def move_reflex_to_goal_positions(palm,e2):
'''
This is to move the Reflex to gp_servo position at the rate set by
'reflex_loop_rate'
:type palm: The reflex object
:param palm: This is the reflex object in the main
:return:
'''
counter = 1
global last_time, continue_reflex_loop, previous_gp, joy_moved
last_reflex_time = last_time
#my_logger.info('Entering Reflex thread')
while (reflex_command_loop):
#my_logger.info('Reflex thread while loop Counter = {}'.format(counter))
e2.wait() # This is used to pause the thread in case we want to calibrate the gripper
present_time = datetime.now()
delta_t = present_time - last_reflex_time
delta_t = delta_t.seconds*1000000 + delta_t.microseconds
if (delta_t < reflex_loop_rate):
delay = reflex_loop_rate - delta_t
delay = delay/1000000.0
time.sleep(delay)
with my_lock:
gp = list(gp_servo)
move_servo = joy_moved
#my_logger.info('Reflex Counter: joy_moved = {}, move_servo is {}'.format(joy_moved,move_servo))
#if gp[1] != previous_gp[1] or gp[2] != previous_gp[2] or gp[3] != previous_gp[3] or gp[3] != previous_gp[3]:
command_time = datetime.now()
if move_servo:
my_logger.info('Reflex Counter: Iam in the If condition')
my_logger.info('Reflex Counter: {} - Time of Command: {}'.format(counter, command_time))
my_logger.info('-->Reflex - GP = {} Previous GP = {}'.format(gp, previous_gp))
palm.move_to_goal_position(gp)
my_logger.info('-->Reflex - Moved to GP = {}'.format(gp))
previous_gp = gp
with my_lock:
joy_moved = False
my_logger.info('Reflex - Resetting joy_moved to {}'.format(joy_moved))
counter += 1
last_reflex_time = present_time
my_logger.info('Exit Reflex thread')
if __name__ == '__main__':
# Set up a logger with output level set to debug; Add the handler to the logger
my_logger = logging.getLogger("My_Logger")
my_logger.setLevel(LOG_LEVEL)
handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=6000000, backupCount=5)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
my_logger.addHandler(handler)
# end of logfile preparation Log levels are debug, info, warn, error, critical
#Create Palm object
palm = reflex.reflex_sf() # Reflex object ready
my_logger.info('Reflex_SF object created')
calibrate = False
for i in range(1,5,1):
lowest_position = palm.finger[i]["lower_limit"]
highest_position = palm.finger[i]["upper_limit"]
init_position = palm.finger[i]["initial_position"]
gp_servo[i] = init_position
previous_gp[i] = init_position
my_logger.info('--- Finger {}:'.format(i))
my_logger.info(' Lower Limit Position --- {}'.format(lowest_position))
my_logger.info(' Upper Limit Position --- {}'.format(highest_position))
my_logger.info(' Initial Position {}'.format(init_position))
if (i == 1 or i == 3):
a = lowest_position - POS_ERROR
b= highest_position + POS_ERROR
if a >= init_position or init_position >= b:
my_logger.info('Servo {} Initial Position {} not between Lower Limit {} and Upper Limit {}'\
.format(i,init_position,lowest_position,highest_position))
print('Servo {} Initial Position {} not between Lower Limit {} and Upper Limit {}'.format(\
i,init_position,lowest_position,highest_position))
elif (i == 2):
a = lowest_position + POS_ERROR
b = highest_position - POS_ERROR
if a <= init_position or init_position <= b:
my_logger.info('Servo {} Initial Position {} not between Lower Limit {} and Upper Limit {}'\
.format(i,init_position,lowest_position,highest_position))
print('Servo {} Initial Position {} not between Lower Limit {} and Upper Limit {}'.format(\
i,init_position,lowest_position,highest_position))
# calibration is a must after every start.
pygame.init()
# Set the width and height of the screen [width,height]
size = [500, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Reflex_SF JoyStick Movements")
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# for print in Pygame screen object
textPrint = sp.TextPrint()
# Joystick Values
my_joy = js.ExtremeProJoystick()
my_controller = reflex.joy_reflex_controller(my_joy,palm)
# Event to pause the thread
e2 = threading.Event()
e2.set() #Flag is set to allow the thread move_reflex_to_goal_positions to run
while (calibrate == False):
for event in pygame.event.get():
if event.type == pygame.JOYBUTTONDOWN:
button = my_joy.get_button_pressed(event)
my_logger.info("Button {} pressed".format(button))
if button in (2,3,6,7,8,9,10,11):
my_controller.set_button_press(button)
elif button == 1: #silver button on the right facing the buttons
gp_servo = my_controller.update_calibrate()
previous_gp = gp_servo
calibrate = True
else:
my_logger.info("Button {} press ignored before calibration".format(button))
elif event.type == pygame.JOYBUTTONUP:
button = my_joy.get_button_released(event)
my_logger.info("Button {} Released".format(button))
if button in (2,3,6,7,8,9,10,11):
my_controller.set_button_release(button)
else:
my_logger.info("Button {} press ignored before calibration".format(button))
else:
pass # ignoring other non-logitech joystick event types
# preparing the two threads that will run
get_goal_position_thread = threading.Thread(target = update_joy_displacement,args=(my_joy,palm,e2))
set_goal_position_thread = threading.Thread(target = move_reflex_to_goal_positions, args=(palm,e2))
get_goal_position_thread.start()
set_goal_position_thread.start()
# The main loop that examines for other UI actions including Joy button/HatLoop until the user clicks the close button.
done = False
while (done == False):
screen.fill(WHITE)
textPrint.reset()
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
stop_joy_loop()
stop_reflex_loop()
elif event.type == pygame.JOYBUTTONDOWN:
button = my_joy.get_button_pressed(event)
my_logger.info("Button {} pressed".format(button))
if (button == 1):
e2.clear()
time.sleep(1)
palm.move_to_rest_position()
gp_servo = palm.read_palm_servo_positions()
my_logger.info("Finger Rest Positions {}".format(gp_servo))
time.sleep(1)
my_logger.info("Setting Event Flag")
e2.set()
else:
my_controller.set_button_press(button)
elif event.type == pygame.JOYBUTTONUP:
button = my_joy.get_button_released(event)
my_logger.info("Button {} Released".format(button))
my_controller.set_button_release(button)
elif event.type == pygame.JOYHATMOTION:
my_logger.info("Hat movement - {}".format(my_joy.get_hat_movement(event)))
pass
elif event.type == pygame.JOYAXISMOTION:
pass
else:
pass # ignoring other non-logitech joystick event types
# The code below is to test the measurement of Axes displacement in the Joystick and should be removed
'''
Num_Axes = my_joy.axes
for k in range(0,Num_Axes,1):
d = my_joy.get_axis_displacement_and_grip(k)
#my_logger.info("Axis No.: {} Move: {} Displacement: {} Grip: {}".format(k,d[0],d[1],d[2]))
if d[0] == 1:
palm.grip_fingers(d[1],d[2])
elif d[0] == 2:
palm.space_finger1_and_finger2(d[1],d[2])
'''
# end of test code for the measurement of Axes displacement in the Joystick
textPrint.Screenprint(screen, "When ready to Quit, close the screen")
textPrint.Yspace()
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 20 frames per second OR 50 ms scan rate - 1000/20 = 50 ms Both display and checking of Joystick;
clock.tick(SCAN_RATE)
| 2.765625 | 3 |
models/cifar_mobilenetv1.py | xhchrn/open_lth | 9 | 12771166 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
from foundations import hparams
from lottery.desc import LotteryDesc
from models import base
from pruning import sparse_global
class Model(base.Model):
"""A MobileNet-V1 as originally designed for CIFAR-10."""
class Block(nn.Module):
"""A MobileNet-V1 block."""
# def __init__(self, f_in: int, f_out: int, downsample=False):
def __init__(self, f_in: int, f_out: int, stride=1):
super(Model.Block, self).__init__()
self.conv1 = nn.Conv2d(f_in, f_in, kernel_size=3, stride=stride, padding=1, groups=f_in, bias=False)
self.bn1 = nn.BatchNorm2d(f_in)
self.conv2 = nn.Conv2d(f_in, f_out, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(f_out)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return F.relu(out)
def __init__(self, initializer, num_512_blocks=5, outputs=None):
super(Model, self).__init__()
outputs = outputs or 10
# (128,2) means conv planes=128, conv stride=2, by default conv stride=1
# cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
cfg_part1 = [64, (128,2), 128, (256,2), 256, (512,2)]
cfg_part2 = [512] * num_512_blocks
cfg_part3 = [(1024,2), 1024]
# Initial convolution.
self.conv = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn = nn.BatchNorm2d(32)
# The subsequent layers of MobileNet-V1.
self.layers_part1 = self._make_layers(in_planes=32 , config=cfg_part1)
self.layers_part2 = self._make_layers(in_planes=512, config=cfg_part2)
self.layers_part3 = self._make_layers(in_planes=512, config=cfg_part3)
# Final fc layer. Size = number of filters in last segment.
self.fc = nn.Linear(1024, outputs)
self.criterion = nn.CrossEntropyLoss()
# Initialize.
self.apply(initializer)
def _make_layers(self, in_planes, config):
layers = []
for x in config:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Model.Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn(self.conv(x)))
out = self.layers_part1(out)
out = self.layers_part2(out)
out = self.layers_part3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
@property
def output_layer_names(self):
return ['fc.weight', 'fc.bias']
@staticmethod
def is_valid_model_name(model_name):
return (model_name.startswith('cifar_mobilenetv1') and
3 >= len(model_name.split('_')) >= 2 and
all([x.isdigit() and int(x) > 0 for x in model_name.split('_')[2:]]) and
(len(model_name.split('_')) == 2 or int(model_name.split('_')[2]) >= 1))
@staticmethod
def get_model_from_name(model_name, initializer, outputs=10):
"""The naming scheme for a MobileNetV1 is 'cifar_mobilenetv1[_N]'.
The name of a MobileNetV1 is 'cifar_mobilenetv1[_N]'.
N is the total number of blocks with 512 input and output channels and stride 1.
The default value of W is 5 if it isn't provided.
"""
if not Model.is_valid_model_name(model_name):
raise ValueError('Invalid model name: {}'.format(model_name))
name = model_name.split('_')
N = 5 if len(name) == 2 else int(name[2])
return Model(initializer, N, outputs)
@property
def loss_criterion(self):
return self.criterion
@staticmethod
def default_hparams():
model_hparams = hparams.ModelHparams(
model_name='cifar_mobilenetv1',
model_init='kaiming_normal',
batchnorm_init='uniform',
)
dataset_hparams = hparams.DatasetHparams(
dataset_name='cifar10',
batch_size=128,
)
training_hparams = hparams.TrainingHparams(
optimizer_name='sgd',
momentum=0.9,
milestone_steps='80ep,120ep',
lr=0.1,
gamma=0.1,
weight_decay=1e-4,
training_steps='160ep',
)
pruning_hparams = sparse_global.PruningHparams(
pruning_strategy='sparse_global',
pruning_fraction=0.2
)
return LotteryDesc(model_hparams, dataset_hparams, training_hparams, pruning_hparams)
| 2.28125 | 2 |
Hello World Programs/Hello World.py | chloee59/Contribute-to-HacktoberFest2020 | 0 | 12771167 | <filename>Hello World Programs/Hello World.py
print ("Hello world")
print ("Nice to meet you!")
name = input ("what is your name")
print ("I hope to code with you more" name "!")
| 3.25 | 3 |
kslurm/shell.py | pvandyken/kslurm | 1 | 12771168 | <reponame>pvandyken/kslurm
from __future__ import absolute_import, annotations
import importlib.resources as impr
import os
import subprocess as sp
from pathlib import Path
from shellingham import ShellDetectionFailure, detect_shell
class Shell:
"""
Represents the current shell.
"""
_shell = None
def __init__(self, name: str, path: str) -> None:
self._name = name
self._path = path
@property
def name(self) -> str:
return self._name
@property
def path(self) -> str:
return self._path
@classmethod
def get(cls) -> Shell:
"""
Retrieve the current shell.
"""
if cls._shell is not None:
return cls._shell
try:
name, path = detect_shell(os.getpid())
except (RuntimeError, ShellDetectionFailure):
shell = None
if os.name == "posix":
shell = os.environ.get("SHELL")
elif os.name == "nt":
shell = os.environ.get("COMSPEC")
if not shell:
raise RuntimeError("Unable to detect the current shell.")
name, path = Path(shell).stem, shell
if name != "bash":
print("At this time, only bash shell is supported.")
exe = sp.run(["command", "-v", "bash"], capture_output=True)
try:
exe.check_returncode()
except sp.CalledProcessError:
print("No bash executable found on $PATH. Aborting")
name = "bash"
path = exe.stdout.decode()
cls._shell = cls(name, path)
return cls._shell
def activate(self, env: Path):
activate_script = self._get_activate_script()
activate_path = env / "bin" / activate_script
if self._name == "bash":
with impr.path("kslurm.bin", "kpy-init.sh") as path:
os.environ["activate_path"] = str(activate_path)
os.environ["kpy_set_subshell"] = "1"
sp.run([self._path, "--init-file", path.resolve()])
del os.environ["activate_path"]
def source(self, env: Path):
activate_script = self._get_activate_script()
activate_path = env / "bin" / activate_script
if self._name == "bash":
with impr.path("kslurm.bin", "kpy-init.sh") as path:
os.environ["activate_path"] = str(activate_path)
return f"activate_path={activate_path}; . {path.resolve()}"
return ""
def _get_activate_script(self) -> str:
if self._name == "fish":
suffix = ".fish"
elif self._name in ("csh", "tcsh"):
suffix = ".csh"
elif self._name in ("powershell", "pwsh"):
suffix = ".ps1"
elif self._name == "cmd":
suffix = ".bat"
else:
suffix = ""
return "activate" + suffix
def _get_source_command(self) -> str:
if self._name in ("fish", "csh", "tcsh"):
return "source"
return "."
def __repr__(self) -> str:
return f'{self.__class__.__name__}("{self._name}", "{self._path}")'
| 1.90625 | 2 |
python/utils.py | SamoFMF/Slovenia---Settlements-Post-Offices | 0 | 12771169 | # Utilities
import pickle
from math import pi, cos, sin, asin, sqrt
def saveFile(filename, data):
with open(filename, "wb") as f:
pickle.dump(data, f)
def loadFile(filename):
with open(filename, "rb") as f:
return pickle.load(f)
def coordToDeg(coord):
return coord[0] + coord[1] / 60 + coord[2] / 3600
def deg2rad(deg):
return deg * pi / 180
def rad2deg(rad):
return 180 * rad / pi
'''
Uses Haversine formula to calculate
distance between 2 points on a sphere
(approximation of Earth).
Input:
-point P given as (longitude, latitude),
where each is given as (degrees, minutes, seconds)
-point Q
Output:
-distance in kilometers
'''
def getDistance(P, Q): # point = (longitude, latitude)
x1, y1 = P; x1 = deg2rad(coordToDeg(x1)); y1 = deg2rad(coordToDeg(y1))
x2, y2 = Q; x2 = deg2rad(coordToDeg(x2)); y2 = deg2rad(coordToDeg(y2))
f1 = sin((y2-y1)/2)**2
f2 = cos(y1)
f3 = cos(y2)
f4 = sin((x2-x1)/2)**2
f = sqrt(f1 + f2*f3*f4)
R = (6356.752 + 6378.137) / 2 # Average of "Earth radius" at the poles and at the equator
return 2 * R * asin(f) | 3.8125 | 4 |
navicatGA/test/test_real_application.py | lcmd-epfl/NaviCatGA | 1 | 12771170 | #!/usr/bin/env python3
from navicatGA.selfies_solver import SelfiesGenAlgSolver
from navicatGA.score_modifiers import score_modifier
from navicatGA.wrappers_selfies import (
sc2smiles,
sc2mol_structure,
mol_structure2depictions,
)
from navicatGA.quantum_wrappers_selfies import sc2gap
from navicatGA.wrappers_selfies import sc2logp, sc2mw
# In this test, we dont use a chimera scalarizer and we simply define a combined fitness function
def fitness_function_wrapper(target_1, target_2, target_3):
return (
lambda chromosome: (
0.4 * score_modifier(sc2gap(chromosome, lot=0), target_1, 3)
+ 0.4 * score_modifier(sc2logp(chromosome), target_2, 1)
+ 0.2 * score_modifier(sc2mw(chromosome), target_3, 3)
)
/ 3
)
def test_real_application_16():
starting_selfies = ["[C][O][=C][C][=N][Ring_1]"]
solver = SelfiesGenAlgSolver(
n_genes=15,
pop_size=10,
max_gen=10,
fitness_function=fitness_function_wrapper(
target_1=0.05, target_2=0.1, target_3=65
), # homo-lumo gap, logp, mw
starting_selfies=starting_selfies,
starting_stoned=True,
prune_duplicates=True,
mutation_rate=0.05,
selection_rate=0.4,
random_state=666,
n_crossover_points=1,
verbose=False,
progress_bars=True,
to_file=True,
selection_strategy="boltzmann",
to_stdout=False,
logger_level="INFO",
logger_file="real_application.log",
show_stats=True,
)
solver.solve()
print(
"After optimization, the corresponding SMILES is : {0}".format(
sc2smiles(solver.best_individual_)
)
)
print(
"It has properties: \n HOMO-LUMO gap : {0} \n LogP : {1} \n Molecular weight : {2}".format(
sc2gap(solver.best_individual_),
sc2logp(solver.best_individual_),
sc2mw(solver.best_individual_),
)
)
mol = sc2mol_structure(solver.best_individual_)
mol_structure2depictions(mol, "real_application")
solver.close_solver_logger()
if __name__ == "__main__":
test_real_application_16()
| 2.28125 | 2 |
Q02__/80_Wiggle_Sort/test.py | hsclinical/leetcode | 0 | 12771171 | <filename>Q02__/80_Wiggle_Sort/test.py<gh_stars>0
#!/usr/bin/python
from Solution import Solution
obj = Solution()
#A = [3,5,2,1,6,4]
A = [3,5,4,2,1,6,4]
out = obj.wiggleSort(A)
print(out)
| 3.109375 | 3 |
setup.py | dilwong/algorithms | 0 | 12771172 | <reponame>dilwong/algorithms
import setuptools
if __name__ == "__main__":
with open('requirements.txt', 'r') as f:
requirements = f.readlines()
requirements = [line.strip() for line in requirements if line.strip()]
setuptools.setup(name = 'algorithms',
version = '1.0.2',
author = '<NAME>',
author_email = '',
description = 'Basic algorithms and data structures',
url = 'https://github.com/dilwong/algorithms',
install_requires = requirements,
packages=['algorithms'],
package_dir={'algorithms': 'python'}
) | 1.664063 | 2 |
atlas/workflow/scripts/convert_jgi2vamb_coverage.py | alienzj/atlas | 204 | 12771173 | <reponame>alienzj/atlas<filename>atlas/workflow/scripts/convert_jgi2vamb_coverage.py
#!/usr/bin/env python
import os
import sys
import re
def main(jgi_file):
# parsing input
header = {}
col2keep = ["contigName", "contigLen", "totalAvgDepth"]
with open(jgi_file) as inF:
for i, line in enumerate(inF):
line = line.rstrip().split("\t")
if i == 0:
header = {x: ii for ii, x in enumerate(line)}
col2keep += [x for x in line if x.endswith(".bam")]
print("\t".join(col2keep))
continue
elif line[0] == "":
continue
# contig ID
contig = line[header["contigName"]]
# collect per-sample info
out = []
for col in col2keep:
out.append(line[header[col]])
print("\t".join(out))
if __name__ == "__main__":
if "snakemake" in globals():
with open(snakemake.log[0], "w") as log:
sys.stderr = log
with open(snakemake.output[0], "w") as outf:
sys.stdout = outf
main(snakemake.input[0])
else:
import argparse
import logging
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.DEBUG)
class CustomFormatter(
argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter
):
pass
desc = (
"Converting jgi_summarize_bam_contig_depths output to format used by VAMB"
)
epi = """DESCRIPTION:
Output format: contigName<tab>contigLen<tab>totalAvgDepth<tab>SAMPLE1.sort.bam<tab>Sample2.sort.bam<tab>...
Output written to STDOUT
"""
parser = argparse.ArgumentParser(
description=desc, epilog=epi, formatter_class=CustomFormatter
)
argparse.ArgumentDefaultsHelpFormatter
parser.add_argument(
"jgi_file",
metavar="jgi_file",
type=str,
help="jgi_summarize_bam_contig_depths output table",
)
parser.add_argument("--version", action="version", version="0.0.1")
args = parser.parse_args()
main(args.jgi_file)
| 2.65625 | 3 |
main.py | Adrian-at-CrimsonAuzre/madlib_generator | 0 | 12771174 | import madlib
for i in range(0, 100):
print(madlib.get_madlib())
| 2.203125 | 2 |
geostream/vector/serialize.py | karimbahgat/GeoStream | 0 | 12771175 | <reponame>karimbahgat/GeoStream
import sqlite3
from sqlite3 import Binary
from shapely.wkb import loads as wkb_loads
from shapely.geometry import shape
from shapely.geometry import Point, MultiPoint, LineString, MultiLineString, Polygon, MultiPolygon
def shapely_to_wkb(shp):
# shapely to wkb buffer
wkb = shp.wkb
buf = Binary(wkb)
return buf
def geoj_to_wkb(geoj):
# geojson to wkb buffer
wkb = shape(geoj).wkb
buf = Binary(wkb)
return buf
def from_wkb(wkb_buf):
# wkb buffer to shapely
shp = wkb_loads(bytes(wkb_buf))
return shp
for geotype in [Point, MultiPoint, LineString, MultiLineString, Polygon, MultiPolygon]:
sqlite3.register_adapter(geotype, shapely_to_wkb)
sqlite3.register_adapter(dict, geoj_to_wkb)
sqlite3.register_converter('geom', from_wkb)
| 2.5 | 2 |
karabo/simulation/station.py | i4Ds/Karabo-Pipeline | 0 | 12771176 | from karabo.simulation.coordinate_helper import east_north_to_long_lat
from karabo.simulation.east_north_coordinate import EastNorthCoordinate
class Station:
def __init__(self, position: EastNorthCoordinate,
parent_longitude: float = 0,
parent_latitude: float = 0,
parent_altitude: float = 0):
"""
:param position: Position of station in relation to the telescope.png centre
"""
self.position: EastNorthCoordinate = position
self.antennas: [EastNorthCoordinate] = []
long, lat = east_north_to_long_lat(position.x, position.y, parent_longitude, parent_latitude)
self.longitude: float = long
self.latitude: float = lat
self.altitude: float = position.z
def add_station_antenna(self, antenna: EastNorthCoordinate):
self.antennas.append(antenna)
| 2.984375 | 3 |
src/insert.py | riggedCoinflip/danbooru_big_data | 0 | 12771177 | <reponame>riggedCoinflip/danbooru_big_data<filename>src/insert.py
import psycopg2
import time
from connect import connect
from load_dataset import yield_dataset
def main():
with connect() as conn:
datasets = yield_dataset()
for i, dataset in enumerate(datasets):
if i < 25000:
continue
print(f'{i=}, {dataset=}')
insert(dataset, conn)
# up
if i % 100 == 0:
conn.commit()
#if i >= 10:
#return #testing
#time.sleep(1) # testing
def insert(ds:dict, conn):
d = {
'image_id': int(ds['id']),
'uploader_id': int(ds['uploader_id']),
'approver_id': int(ds['approver_id']),
'created_at': ds['created_at'],
'updated_at': ds['updated_at'],
'last_commented_at': ds['last_commented_at'],
'score': int(ds['score']),
'up_score': int(ds['up_score']),
'down_score': int(ds['down_score']),
'source': ds['source'],
'rating': ds['rating'],
'image_width': int(ds['image_width']),
'image_height': int(ds['image_height']),
'file_size': int(ds['file_size']),
'has_children': ds['has_children'],
'is_note_locked': ds['is_note_locked'],
'is_status_locked': ds['is_status_locked'],
'is_pending': ds['is_pending'],
'is_flagged': ds['is_flagged'],
'is_deleted': ds['is_deleted'],
'is_banned': ds['is_banned']
}
if int(ds['parent_id']) != 0:
d['parent_id'] = int(ds['parent_id'])
# https://stackoverflow.com/questions/29461933/insert-python-dictionary-using-psycopg2
if 'uploader_id' in d:
insert_user(d['uploader_id'], conn)
if 'approver_id' in d:
insert_user(d['approver_id'], conn)
insert_image(d, conn)
for tag in ds['tags']:
insert_tag(tag, conn)
insert_tags_image(int(tag['id']), int(d['image_id']), conn)
def insert_user(user_id:int, conn):
d = {
'user_id' : user_id
}
if not in_db('users', f'user_id = {d["user_id"]}', conn):
query_insert_one('users', d, conn)
#else: # testing
#print(f'user: {d["user_id"]} already in DB') # testing
def insert_image(d:dict ,conn):
if not in_db('image', f'image_id = {d["image_id"]}', conn):
query_insert_one('image', d, conn)
#else: # testing
#print(f'image: {d["image_id"]} already in DB') # testing
def insert_tag(tag:dict, conn):
d = {
'tag_id': int(tag['id']),
'name': tag['name'],
'tag_category_id': int(tag['category'])
}
if not in_db('tags', f'tag_id = {d["tag_id"]}', conn):
query_insert_one('tags', d, conn)
#else: #testing
#print(f'tag: {d["tag_id"]} already in DB') #testing
def insert_tags_image(tag_id:int, image_id:int, conn):
d = {
'tag_id': tag_id,
'image_id': image_id
}
if not in_db('tags_image', f'tag_id = {d["tag_id"]} AND image_id = {d["image_id"]}', conn):
query_insert_one('tags_image', d, conn)
#else: # testing
#print(f'tags_image: {d["tag_id"]}, {d["image_id"]} already in DB') # testing
def in_db(table:str, filter:str, conn):
with conn.cursor() as cursor:
#print(cursor.mogrify(f'SELECT EXISTS(SELECT 1 FROM {table} WHERE {filter}')) # testing
cursor.execute(f'SELECT EXISTS(SELECT 1 FROM {table} WHERE {filter})')
row = cursor.fetchone()
#print(row) # testing
return(row[0])
def query_insert_one(table:str, ds:dict, conn):
# TODO use cursor.executemany as query_insert_many
columns = ds.keys()
column_str = ', '.join(columns)
values = [ds[column] for column in columns]
vals_str = ", ".join(["%s"] * len(values))
with conn.cursor() as cursor:
#print(cursor.mogrify(f'INSERT INTO {table} ({column_str}) VALUES ({vals_str})', values)) # testing
cursor.execute(f'INSERT INTO {table} ({column_str}) VALUES ({vals_str})', values)
def test():
# check if connection is established
with connect() as conn:
cursor = conn.cursor()
cursor.execute("select * from tag_category")
for row in cursor:
print(row[1])
if __name__ == '__main__':
main()
| 2.421875 | 2 |
sorting_algorithms.py | edzelle/Projects | 0 | 12771178 | import numpy as np
import matplotlib.pyplot as plt
import timeit
import random
import math
def insertionSort(a):
for i in range(1,len(a)):
value = a[i]
pos = i
while (pos > 0 and value < a[pos-1]):
a[pos] = a[pos-1]
pos = pos-1
a[pos] = value
def countingSort(a,max):
m = max+1
count = [0 for i in range(m)]
for i in a:
count[i] +=1
x = 0
for i in range(m):
for j in range(count[i]):
a[x]=a
x+=1
def quickSort(a, low, high):
if (low >= high):
return
i, j = low, high
pivot = a[random.randint(low, high)]
while i <= j:
while a[i] < pivot: i += 1
while a[j] > pivot: j -= 1
if i <= j:
a[i], a[j] = a[j], a[i]
i, j = i + 1, j - 1
quickSort(a, low, j)
quickSort(a, i, high)
def mergeSort(a):
if (len(a)< 2):
return a
pivot = len(a)//2
left = mergeSort(a[:pivot])
right = mergeSort(a[pivot:])
return merge(left,right)
def merge(left,right):
if not left or not right:
return left or right
aux = []
i = 0
j = 0
while (len(aux) < len(left) + len(right)):
if (left[i] < right[j]):
aux.append(left[i])
i += 1
else:
aux.append(right[j])
j += 1
if (i == len(left) or j == len(right)):
aux.extend(left[i:] or right[j:])
break
return aux
def radixSort(a):
length = len(a)
out = [0] * length
count = [0]*10
for i in a:
if (i>0):
x = (a[i]//10)
count[(x)%10]+=1
for i in range(1,10):
count[i] += count[i-1]
i = length - 1
while (1>=0):
x = (a[i]//10)
out[count[(x)%10]-1] = a[i]
count[(x)%10] -= 1
i -= 1
for i in range(len(a)):
a[i] = out[i]
############## Begin Here ###############
a = np.random.randint(101, size = 128)
a = a.tolist()
radixSort(a)
print('true')
############ insertion sort ###############
b = np.array([])
c = np.array([])
d = np.array([])
e = np.array([])
counts = 0
print('128')
while (counts < 100):
a = np.random.randint(101, size = 128)
t1 = timeit.default_timer()
insertionSort(a)
t2 = timeit.default_timer()
t_diff = t2-t1
b = np.append(b, t_diff)
counts = counts + 1
b_insertion_sort_mean = np.mean(b)
print('1024')
while (counts < 200):
a = np.random.randint(101, size = 1024)
t1 = timeit.default_timer()
insertionSort(a)
t2 = timeit.default_timer()
t_diff = t2-t1
c = np.append(c, t_diff)
counts = counts + 1
c_insertion_sort_mean = np.mean(c)
print('4096')
while (counts < 300):
a = np.random.randint(101, size = 4096)
t1 = timeit.default_timer()
insertionSort(a)
t2 = timeit.default_timer()
t_diff = t2-t1
d = np.append(d, t_diff)
counts = counts + 1
d_insertion_sort_mean = np.mean(d)
print('16384')
while (counts < 400):
a = np.random.randint(101, size = 16384)
t1 = timeit.default_timer()
insertionSort(a)
t2 = timeit.default_timer()
t_diff = t2-t1
e = np.append(e, t_diff)
counts = counts + 1
e_insertion_sort_mean = np.mean(e)
################ counting sort ##################
b = np.array([])
c = np.array([])
d = np.array([])
e = np.array([])
counts = 0
print('128')
while (counts < 100):
a = np.random.randint(101, size = 128)
t1 = timeit.default_timer()
countingSort(a,100)
t2 = timeit.default_timer()
t_diff = t2-t1
b = np.append(b, t_diff)
counts = counts + 1
b_countingsort_mean = np.mean(b)
print('1024')
while (counts < 200):
a = np.random.randint(101, size = 1024)
t1 = timeit.default_timer()
countingSort(a,100)
t2 = timeit.default_timer()
t_diff = t2-t1
c = np.append(c, t_diff)
counts = counts + 1
c_countingsort_mean = np.mean(c)
print('4096')
while (counts < 300):
a = np.random.randint(101, size = 4096)
t1 = timeit.default_timer()
countingSort(a,100)
t2 = timeit.default_timer()
t_diff = t2-t1
d = np.append(d, t_diff)
counts = counts + 1
d_countingsort_mean = np.mean(d)
print('16384')
while (counts < 400):
a = np.random.randint(101, size = 16384)
t1 = timeit.default_timer()
countingSort(a,100)
t2 = timeit.default_timer()
t_diff = t2-t1
e = np.append(e, t_diff)
counts = counts + 1
e_countingsort_mean = np.mean(e)
################ quick sort ##################
b = np.array([])
c = np.array([])
d = np.array([])
e = np.array([])
counts = 0
print('128')
while (counts < 100):
a = np.random.randint(101, size = 128)
t1 = timeit.default_timer()
quickSort(a)
t2 = timeit.default_timer()
t_diff = t2-t1
b = np.append(b, t_diff)
counts = counts + 1
b_quicksort_mean = np.mean(b)
print('1024')
while (counts < 200):
a = np.random.randint(101, size = 1024)
t1 = timeit.default_timer()
quickSort(a)
t2 = timeit.default_timer()
t_diff = t2-t1
c = np.append(c, t_diff)
counts = counts + 1
c_quicksort_mean = np.mean(c)
print('4096')
while (counts < 300):
a = np.random.randint(101, size = 4096)
t1 = timeit.default_timer()
quickSort(a)
t2 = timeit.default_timer()
t_diff = t2-t1
d = np.append(d, t_diff)
counts = counts + 1
d_quicksort_mean = np.mean(d)
print('16384')
while (counts < 400):
a = np.random.randint(101, size = 16384)
t1 = timeit.default_timer()
quickSort(a)
t2 = timeit.default_timer()
t_diff = t2-t1
e = np.append(e, t_diff)
counts = counts + 1
e_quicksort_mean = np.mean(e)
############### merge sort ###################
b = np.array([])
c = np.array([])
d = np.array([])
e = np.array([])
counts = 0
print('128')
while (counts < 100):
a = np.random.randint(101, size = 128)
t1 = timeit.default_timer()
mergeSort(a)
t2 = timeit.default_timer()
t_diff = t2-t1
b = np.append(b, t_diff)
counts = counts + 1
b_mergesort_mean = np.mean(b)
print('1024')
while (counts < 200):
a = np.random.randint(101, size = 1024)
t1 = timeit.default_timer()
mergeSort(a)
t2 = timeit.default_timer()
t_diff = t2-t1
c = np.append(c, t_diff)
counts = counts + 1
c_mergesort_mean = np.mean(c)
print('4096')
while (counts < 300):
a = np.random.randint(101, size = 4096)
t1 = timeit.default_timer()
mergeSort(a)
t2 = timeit.default_timer()
t_diff = t2-t1
d = np.append(d, t_diff)
counts = counts + 1
d_mergesort_mean = np.mean(d)
print('16384')
while (counts < 400):
a = np.random.randint(101, size = 16384)
t1 = timeit.default_timer()
mergeSort(a)
t2 = timeit.default_timer()
t_diff = t2-t1
e = np.append(e, t_diff)
counts = counts + 1
e_mergesort_mean = np.mean(e)
############### radix sort ##################
b = np.array([])
c = np.array([])
d = np.array([])
e = np.array([])
counts = 0
print('128')
while (counts < 100):
a = np.random.randint(101, size = 128)
t1 = timeit.default_timer()
radixSort(a,10,100)
t2 = timeit.default_timer()
t_diff = t2-t1
b = np.append(b, t_diff)
counts = counts + 1
b_radixsort_mean = np.mean(b)
print('1024')
while (counts < 200):
a = np.random.randint(101, size = 1024)
t1 = timeit.default_timer()
radixSort(a,10,100)
t2 = timeit.default_timer()
t_diff = t2-t1
c = np.append(c, t_diff)
counts = counts + 1
c_radixsort_mean = np.mean(c)
print('4096')
while (counts < 300):
a = np.random.randint(101, size = 4096)
t1 = timeit.default_timer()
radixSort(a,10,100)
t2 = timeit.default_timer()
t_diff = t2-t1
d = np.append(d, t_diff)
counts = counts + 1
d_radixsort_mean = np.mean(d)
print('16384')
while (counts < 400):
a = np.random.randint(101, size = 16384)
t1 = timeit.default_timer()
radixSort(a,10,100)
t2 = timeit.default_timer()
t_diff = t2-t1
e = np.append(e, t_diff)
counts = counts + 1
e_radixsort_mean = np.mean(e)
############## plotting ##############
plt.plot([128, 1024, 4096, 16384],
[b_insertion_sort_mean, c_insertion_sort_mean, d_insertion_sort_mean,e_insertion_sort_mean],
c = 'blue',
label = 'Insertion Sort',
linestyle = '--',
linewidth = 2)
plt.plot([128, 1024, 4096, 16384],
[b_countingsort_mean, c_countingsort_mean, d_countingsort_mean,e_countingsort_mean],
c = 'red',
label = 'Counting Sort',
linestyle = '--',
linewidth = 2)
plt.plot([128, 1024, 4096, 16384],
[b_quicksort_mean, c_quicksort_mean, d_quicksort_mean,e_quicksort_mean],
c = 'green',
label = 'Quick Sort',
linestyle = '--',
linewidth = 2)
plt.plot([128, 1024, 4096, 16384],
[b_mergesort_mean, c_mergesort_mean, d_mergesort_mean,e_mergesort_mean],
c = 'yellow',
label = 'Merge Sort',
linestyle = '--',
linewidth = 2)
plt.plot([128, 1024, 4096, 16384],
[b_radixsort_mean, c_radixsort_mean, d_radixsort_mean,e_radixsort_mean],
c = 'black',
label = 'Radix Sort',
linestyle = '--',
linewidth = 2)
plt.axis([0,18000,0,.0005])
plt.show()
| 3.84375 | 4 |
Barrier.py | yunfanz/ReionBub | 1 | 12771179 | <filename>Barrier.py
import cosmolopy.perturbation as pb
import cosmolopy.density as cd
import numpy as np
from IO_utils import *
import matplotlib.pyplot as plt
import matplotlib, seaborn as sns
from scipy import ndimage, interpolate
from tocmfastpy import *
from skimage import measure, morphology, segmentation
from Choud14 import *
import pandas as pd
import optparse
def rescale(image, factor):
new_real_shape = np.asarray(image.shape) * factor
new_shape = np.round(new_real_shape)
real_factor = new_shape / np.asarray(image.shape)
print real_factor
new_image = ndimage.interpolation.zoom(image, real_factor, mode='nearest')
return new_image, real_factor
if __name__=="__main__":
o = optparse.OptionParser()
o.add_option('-d','--dir', dest='DIR', default='/home/yunfanz/Data/21cmFast/Boxes/')
o.add_option('-z','--npz', dest='NPZDIR', default='./NPZ/')
o.add_option('-l','--lin', dest='LIN', action="store_true")
(opts, args) = o.parse_args()
z = 12.00
#npzfile = './NPZ/dwatershed_z{}.npz'.format(z)
wspattern = 'dwatershed_z{}_L*.npz'.format(z)
npzfiles = find_files(opts.NPZDIR, pattern=wspattern)
#deltax_files = find_deltax(opts.DIR, z)
dframes = []
for i, npzfile in enumerate(npzfiles):
npz_params = boxio.parse_filename(npzfile)
if npz_params['BoxSize'] != 512:
continue
data = np.load(npzfile)
labels = data['labels']
scale = data['scale']
labels = measure.label(labels, connectivity=3)
#looking for matching deltax file
#for file in deltax_files:
# if (boxio.parse_filename(file)['BoxSize'] == npz_params['BoxSize']) and (boxio.parse_filename(file)['Iteration'] == npz_params['Iteration']):
# deltax_file = file
# print "Found matching files:", npzfile, deltax_file
# break
deltax_file = args[0]
try:
deltax_image = np.load(deltax_file)
p_dict = boxio.parse_filename(deltax_file)
except:
b1 = boxio.readbox(deltax_file)
deltax_image = b1.box_data
p_dict = b1.param_dict
#deltax_image = rescale(deltax_image, 0.5)
R = measure.regionprops(labels, intensity_image=deltax_image)
print len(R)
if len(R)> 20000:
R = R[:20000]
ES = ESets(z=z)
RE = np.asarray([r.equivalent_diameter/2 for r in R])/scale
#R0L = RE
deltax = np.asarray([r.mean_intensity for r in R])
if not opts.LIN:
deltax /= ES.fgrowth
dx = 1/scale
L = p_dict['BoxSize']
df = pd.DataFrame({'RE':RE, 'deltax':deltax, 'BoxSize': L})
#import IPython; IPython.embed()
#df = df.loc[df['RE'] > max(5*dx, ES.R0min/2)] # 2 is arbitrary, we really want to compare R0L as below
#df = df.loc[df['RE'] < L/10]
if len(df.index) == 0: continue
try:
df['R0L'] = ES.R0(df['RE'])
except(ValueError):
print 'Below interpolation range, use RE'
df['R0L'] = df['RE']
df = df.loc[df['R0L'] > ES.R0min]
df['S'] = sig0(df['R0L'])
dframes.append(df)
df = pd.concat(dframes)
SL = np.linspace(0, 1.1*np.amax(df['S']), 100)
bfzh = BFZH(SL,ES.deltac,ES.smin,ES.K)
#import IPython; IPython.embed()
sns.jointplot(x='S', y="deltax", data=df, kind="reg")
plt.plot(SL, bfzh, 'r', linewidth=3)
#sns.regplot('S','deltax', df)#, scatter_kws={'hue': "BoxSize"})
plt.show()
| 1.882813 | 2 |
shops/Shop.py | monoxacc/GPU_Crawler | 0 | 12771180 | from .Const import Const
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
class Shop():
CRAWLER_DRIVER_PATH = Const.CRAWLER_DRIVER_PATH
reqUserAgent = Const.UserAgent
products = []
seleniumWebDriver = None
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--no-proxy-server')
options.add_argument("--window-position=-700,0")
options.add_argument("--window-size=576,1024")
options.add_argument('--blink-settings=imagesEnabled=false')
# options.add_argument('user-data-dir=' + chrome_profile_path)
self.seleniumWebDriver = webdriver.Chrome(service=Service(Const.CRAWLER_DRIVER_PATH), options=options)
return
def getDomain(self) -> str:
raise NotImplementedError('Override this function in derived class!')
def handlePyResponse(self, resp) -> bool:
raise NotImplementedError('Override this function in derived class!')
def handleSeleniumResponse(self, webdriver) -> bool:
raise NotImplementedError('Override this function in derived class!')
def getProducts(self, url):
print("▶ Working on: 🌎 %s" % url)
self.products = []
if resp := self.execPyRequest(url): # first, try with py requests lib
if self.handlePyResponse(resp):
return self.products # return products set by the derived classes
if resp := self.execSeleniumRequest(url): # try with Selenium
if self.handleSeleniumResponse(self.seleniumWebDriver):
return self.products # return products set by the derived classes
return self.products
def execPyRequest(self, url) -> str:
print("▶ Trying with PyRequestsLib...", end=" ")
try:
response = requests.get(url, headers = Const.UserAgent)
print("HTTP %s" % (response.status_code))
if response.status_code != 200:
return None
with open('debug-last-resp.log', 'a') as debug_file: ### DEBUG
debug_file.write(response.text) ### DEBUG
return response.text
except Exception as e:
print("☠ Got a problem: %s" % e)
return None
def execSeleniumRequest(self, url) -> str:
print("▶ Trying with Selenium..." , end=" ")
try:
self.seleniumWebDriver.get(url)
with open('debug-last-resp.log', 'a') as debug_file: ### DEBUG
debug_file.write(self.seleniumWebDriver.page_source) ### DEBUG
return self.seleniumWebDriver.page_source ###not used currently
except Exception as e:
print("☠ Got a problem: %s" % e)
return None
def close(self):
self.seleniumWebDriver.close()
self.seleniumWebDriver.quit()
def seleniumGetSourceByWaitForClass(self, expected_elem, delay=5):
soup = BeautifulSoup(self.seleniumWebDriver.page_source, features="html.parser")
if soup.findAll(attrs={'class': expected_elem}).__len__() == 0:
try: # page possibly needs some time to load up - so wait little bit
element_present = ec.presence_of_element_located((By.CLASS_NAME, expected_elem))
print("Waiting for element \"%s\"..." % expected_elem)
WebDriverWait(self.seleniumWebDriver, delay).until(element_present)
except TimeoutException:
print("Expected element \"%s\" not found!" % expected_elem)
return None
return self.seleniumWebDriver.page_source
def getChipName(self, name):
chip = "N/A"
if name.__contains__("3060"):
if name.__contains__("Ti ") or name.__contains__("TI "):
chip = Const.CHIP_NAME_3060Ti
else:
chip = Const.CHIP_NAME_3060
elif name.__contains__("3070"):
if name.__contains__("Ti ") or name.__contains__("TI "):
chip = Const.CHIP_NAME_3070Ti
else:
chip = Const.CHIP_NAME_3070
elif name.__contains__("3080"):
if name.__contains__("Ti ") or name.__contains__("TI "):
chip = Const.CHIP_NAME_3080Ti
else:
chip = Const.CHIP_NAME_3080
elif name.__contains__("3090"):
if name.__contains__("Ti ") or name.__contains__("TI "):
chip = Const.CHIP_NAME_3090Ti
else:
chip = Const.CHIP_NAME_3090
elif name.__contains__("6700"):
if name.__contains__("XT "):
chip = Const.CHIP_NAME_6700XT
else:
chip = Const.CHIP_NAME_6700
elif name.__contains__("6800"):
if name.__contains__("XT "):
chip = Const.CHIP_NAME_6800XT
else:
chip = Const.CHIP_NAME_6800
elif name.__contains__("6900"):
if name.__contains__("XT "):
chip = Const.CHIP_NAME_6900XT
else:
chip = Const.CHIP_NAME_6900
return chip | 2.53125 | 3 |
openprocurement/auctions/tessel/tests/document.py | yevheniimoroziuk/openprocurement.auctions.tessel | 0 | 12771181 | # -*- coding: utf-8 -*-
import unittest
from openprocurement.auctions.tessel.tests.base import BaseTesselAuctionWebTest
from openprocurement.auctions.core.tests.base import snitch
from openprocurement.auctions.core.tests.document import (
AuctionDocumentResourceTestMixin,
AuctionDocumentWithDSResourceTestMixin
)
from openprocurement.auctions.core.tests.blanks.document_blanks import (
# TesselAuctionDocumentWithDSResourceTest
create_auction_document_vdr,
put_auction_document_vdr,
)
from openprocurement.auctions.tessel.tests.blanks.document_blanks import (
patch_auction_document
)
class TesselAuctionDocumentResourceTest(BaseTesselAuctionWebTest, AuctionDocumentResourceTestMixin):
docservice = False
test_patch_auction_document = snitch(patch_auction_document)
class TesselAuctionDocumentWithDSResourceTest(TesselAuctionDocumentResourceTest, AuctionDocumentWithDSResourceTestMixin):
docservice = True
test_patch_auction_document = snitch(patch_auction_document)
test_create_auction_document_pas = None
test_put_auction_document_pas = None
def suite():
tests = unittest.TestSuite()
tests.addTest(unittest.makeSuite(TesselAuctionDocumentResourceTest))
tests.addTest(unittest.makeSuite(TesselAuctionDocumentWithDSResourceTest))
return tests
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 2.109375 | 2 |
algorithm/combinations.py | rishabhiitbhu/hackerrank | 0 | 12771182 | <reponame>rishabhiitbhu/hackerrank
l = [1,2,3,4]
def printCombo(l):
for j in range(1, len(l)+1):
for i in range(len(l)-1):
l[i],l[i+1]=l[i+1],l[i]
print(l)
for i in range(len(l)):
printCombo(l[i:])
| 3.75 | 4 |
python/astro_metadata_translator/translators/megaprime.py | lsst/astro_metadata_translator | 1 | 12771183 | # This file is part of astro_metadata_translator.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (http://www.lsst.org).
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
"""Metadata translation code for CFHT MegaPrime FITS headers"""
__all__ = ("MegaPrimeTranslator", )
import re
import posixpath
from astropy.io import fits
from astropy.coordinates import EarthLocation, Angle
import astropy.units as u
from ..translator import cache_translation, CORRECTIONS_RESOURCE_ROOT
from .fits import FitsTranslator
from .helpers import tracking_from_degree_headers, altaz_from_degree_headers
class MegaPrimeTranslator(FitsTranslator):
"""Metadata translator for CFHT MegaPrime standard headers.
"""
name = "MegaPrime"
"""Name of this translation class"""
supported_instrument = "MegaPrime"
"""Supports the MegaPrime instrument."""
default_resource_root = posixpath.join(CORRECTIONS_RESOURCE_ROOT, "CFHT")
"""Default resource path root to use to locate header correction files."""
# CFHT Megacam has no rotator, and the instrument angle on sky is set to
# +Y=N, +X=W which we define as a 0 degree rotation.
_const_map = {"boresight_rotation_angle": Angle(0*u.deg),
"boresight_rotation_coord": "sky",
"detector_group": None}
_trivial_map = {"physical_filter": "FILTER",
"dark_time": ("DARKTIME", dict(unit=u.s)),
"exposure_time": ("EXPTIME", dict(unit=u.s)),
"observation_id": "OBSID",
"object": "OBJECT",
"science_program": "RUNID",
"exposure_id": "EXPNUM",
"visit_id": "EXPNUM",
"detector_serial": "CCDNAME",
"relative_humidity": ["RELHUMID", "HUMIDITY"],
"temperature": (["TEMPERAT", "AIRTEMP"], dict(unit=u.deg_C)),
"boresight_airmass": ["AIRMASS", "BORE-AIRMASS"]}
@cache_translation
def to_datetime_begin(self):
# Docstring will be inherited. Property defined in properties.py
# We know it is UTC
value = self._from_fits_date_string(self._header["DATE-OBS"],
time_str=self._header["UTC-OBS"], scale="utc")
self._used_these_cards("DATE-OBS", "UTC-OBS")
return value
@cache_translation
def to_datetime_end(self):
# Docstring will be inherited. Property defined in properties.py
# Older files are missing UTCEND
if self.is_key_ok("UTCEND"):
# We know it is UTC
value = self._from_fits_date_string(self._header["DATE-OBS"],
time_str=self._header["UTCEND"], scale="utc")
self._used_these_cards("DATE-OBS", "UTCEND")
else:
# Take a guess by adding on the exposure time
value = self.to_datetime_begin() + self.to_exposure_time()
return value
@cache_translation
def to_location(self):
"""Calculate the observatory location.
Returns
-------
location : `astropy.coordinates.EarthLocation`
An object representing the location of the telescope.
"""
# Height is not in some MegaPrime files. Use the value from
# EarthLocation.of_site("CFHT")
# Some data uses OBS-LONG, OBS-LAT, other data uses LONGITUD and
# LATITUDE
for long_key, lat_key in (("LONGITUD", "LATITUDE"), ("OBS-LONG", "OBS-LAT")):
if self.are_keys_ok([long_key, lat_key]):
value = EarthLocation.from_geodetic(self._header[long_key], self._header[lat_key], 4215.0)
self._used_these_cards(long_key, lat_key)
break
else:
value = EarthLocation.of_site("CFHT")
return value
@cache_translation
def to_detector_name(self):
# Docstring will be inherited. Property defined in properties.py
if self.is_key_ok("EXTNAME"):
name = self._header["EXTNAME"]
# Only valid name has form "ccdNN"
if re.match(r"ccd\d+$", name):
self._used_these_cards("EXTNAME")
return name
# Dummy value, intended for PHU (need something to get filename)
return "ccd99"
@cache_translation
def to_detector_num(self):
name = self.to_detector_name()
return int(name[3:])
@cache_translation
def to_observation_type(self):
"""Calculate the observation type.
Returns
-------
typ : `str`
Observation type. Normalized to standard set.
"""
obstype = self._header["OBSTYPE"].strip().lower()
self._used_these_cards("OBSTYPE")
if obstype == "object":
return "science"
return obstype
@cache_translation
def to_tracking_radec(self):
"""Calculate the tracking RA/Dec for this observation.
Currently will be `None` for geocentric apparent coordinates.
Additionally, can be `None` for non-science observations.
The method supports multiple versions of header defining tracking
coordinates.
Returns
-------
coords : `astropy.coordinates.SkyCoord`
The tracking coordinates.
"""
radecsys = ("RADECSYS", "OBJRADEC", "RADESYS")
radecpairs = (("RA_DEG", "DEC_DEG"), ("BORE-RA", "BORE-DEC"))
return tracking_from_degree_headers(self, radecsys, radecpairs)
@cache_translation
def to_altaz_begin(self):
# Docstring will be inherited. Property defined in properties.py
return altaz_from_degree_headers(self, (("TELALT", "TELAZ"), ("BORE-ALT", "BORE-AZ")),
self.to_datetime_begin())
@cache_translation
def to_detector_exposure_id(self):
# Docstring will be inherited. Property defined in properties.py
return self.to_exposure_id() * 36 + self.to_detector_num()
@cache_translation
def to_pressure(self):
# Docstring will be inherited. Property defined in properties.py
# Can be either AIRPRESS in Pa or PRESSURE in mbar
for key, unit in (("PRESSURE", u.hPa), ("AIRPRESS", u.Pa)):
if self.is_key_ok(key):
return self.quantity_from_card(key, unit)
else:
raise KeyError(f"{self._log_prefix}: Could not find pressure keywords in header")
@cache_translation
def to_observation_counter(self):
"""Return the lifetime exposure number.
Returns
-------
sequence : `int`
The observation counter.
"""
return self.to_exposure_id()
@classmethod
def determine_translatable_headers(cls, filename, primary=None):
"""Given a file return all the headers usable for metadata translation.
MegaPrime files are multi-extension FITS with a primary header and
each detector stored in a subsequent extension. MegaPrime uses
``INHERIT=F`` therefore the primary header will always be ignored
if given.
Parameters
----------
filename : `str`
Path to a file in a format understood by this translator.
primary : `dict`-like, optional
The primary header obtained by the caller. This is sometimes
already known, for example if a system is trying to bootstrap
without already knowing what data is in the file. Will be
ignored.
Yields
------
headers : iterator of `dict`-like
Each detector header in turn. The supplied header will never be
included.
Notes
-----
This translator class is specifically tailored to raw MegaPrime data
and is not designed to work with general FITS files. The normal
paradigm is for the caller to have read the first header and then
called `determine_translator()` on the result to work out which
translator class to then call to obtain the real headers to be used for
translation.
"""
# Since we want to scan many HDUs we use astropy directly to keep
# the file open rather than continually opening and closing it
# as we go to each HDU.
with fits.open(filename) as fits_file:
for hdu in fits_file:
# Astropy <=4.2 strips the EXTNAME header but some CFHT data
# have two EXTNAME headers and the CCD number is in the
# second one.
if hdu.name == "PRIMARY":
continue
if hdu.name.startswith("ccd"):
# It may only be some data files that are broken so
# handle the expected form.
yield hdu.header
continue
# Some test data at least has the EXTNAME as
# COMPRESSED_IMAGE but the EXTVER as the detector number.
if hdu.name == "COMPRESSED_IMAGE":
header = hdu.header
# Astropy strips EXTNAME so put it back for the translator
header["EXTNAME"] = f"ccd{hdu.ver:02d}"
yield header
| 1.984375 | 2 |
list1/task1/main.py | ErykKrupa/metaheuristic-algorithms | 0 | 12771184 | <filename>list1/task1/main.py
from time import time
import numpy as np
def local_search(function, neighbour_function, max_execution_time):
end_time = time() + max_execution_time
best_solution = _get_naive_solution()
while time() < end_time:
result = function(best_solution)
neighbourhood = neighbour_function(best_solution)
for neighbour in neighbourhood:
neighbour_result = function(neighbour)
if result > neighbour_result:
best_solution = neighbour
break
else:
return best_solution
return best_solution
def _get_naive_solution():
return list(np.random.uniform(-100, 100, 4))
def _happy_cat(x):
norm = np.linalg.norm(x)
return ((norm - 4) ** 2) ** 0.125 + 1 / 4 * (0.5 * norm ** 2 + sum((x_i for x_i in x))) + 1 / 2
def _get_neighbour_happy_cat(component):
return component + np.random.uniform(-1, 1)
def _griewank(x):
return 1 + 1 / 4000 * sum((x_i ** 2 for x_i in x)) - np.product(
[np.cos(x_i / np.sqrt(i)) for i, x_i in enumerate(x, 1)])
def _get_neighbour_griewank(component):
return component + abs(component) * np.random.uniform(-1, 1) * 2
def _get_neighbourhood(solution, neighbour_function, number_of_neighbours):
return [[neighbour_function(item) for item in solution] for _ in range(number_of_neighbours)]
if __name__ == '__main__':
t, b = input().split()
t = float(t)
best = local_search(_happy_cat if b == '0' else _griewank,
lambda solution: _get_neighbourhood(solution, _get_neighbour_happy_cat if b == '0'
else _get_neighbour_griewank, 500), t)
print(f'{best[0]} {best[1]} {best[2]} {best[3]} {(_happy_cat if b == "0" else _griewank)(best)}')
| 3.265625 | 3 |
apps/pretrained_protein/tape/data_process.py | Noisyntrain/PaddleHelix | 2 | 12771185 | <filename>apps/pretrained_protein/tape/data_process.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Multiple protein datasets.
"""
import json
import numpy as np
import sys
from pahelix.utils.protein_tools import ProteinTokenizer
class Pfam(object):
"""Class for pfam dataset.
For more details, please check paper "Evaluating Protein Transfer Learning with TAPE".
"""
def __init__(self):
self.tokenizer = ProteinTokenizer()
self.clear()
def gen_sequence_data(self, data):
"""Genearte sequence data.
"""
amino_acids = data['amino_acids']
token_ids = self.tokenizer.gen_token_ids(amino_acids)
return token_ids
def append(self, data):
"""Append data.
"""
token_ids = self.gen_sequence_data(data)
self.token_ids.extend(token_ids)
self.lengths.append(len(token_ids))
def clear(self):
"""Clear data.
"""
self.token_ids = []
self.lengths = []
def save_npz(self, filename):
"""Save data to npz format file.
"""
np.savez('%s' % filename,
token_ids=np.array(self.token_ids, dtype='int8'),
lengths=np.array(self.lengths, dtype='int64'))
class SecondaryStructure(object):
"""Class for second structure dataset.
For more details, please check paper "Evaluating Protein Transfer Learning with TAPE".
"""
def __init__(self):
self.tokenizer = ProteinTokenizer()
self.clear()
def gen_sequence_data(self, data):
"""Genearte sequence data.
"""
amino_acids = data['amino_acids']
token_ids = self.tokenizer.gen_token_ids(amino_acids)
labels3 = [0] + data['ss3'] + [0]
labels8 = [0] + data['ss8'] + [0]
return token_ids, labels3, labels8
def append(self, data):
"""Append data.
"""
token_ids, labels3, labels8 = self.gen_sequence_data(data)
self.token_ids.extend(token_ids)
self.labels3.extend(labels3)
self.labels8.extend(labels8)
self.lengths.append(len(token_ids))
def clear(self):
"""Clear data.
"""
self.token_ids = []
self.labels3 = []
self.labels8 = []
self.lengths = []
def save_npz(self, filename):
"""Save data to npz format file.
"""
np.savez('%s' % filename,
token_ids=np.array(self.token_ids, dtype='int8'),
labels3=np.array(self.labels3, dtype='int8'),
labels8=np.array(self.labels8, dtype='int8'),
lengths=np.array(self.lengths, dtype='int64'))
class RemoteHomology(object):
"""Class for remote homology dataset.
For more details, please check paper "Evaluating Protein Transfer Learning with TAPE".
"""
def __init__(self):
self.tokenizer = ProteinTokenizer()
self.clear()
def gen_sequence_data(self, data):
"""Genearte sequence data.
"""
amino_acids = data['amino_acids']
token_ids = self.tokenizer.gen_token_ids(amino_acids)
label = data['fold_label']
return token_ids, label
def append(self, data):
"""Append data.
"""
token_ids, labels = self.gen_sequence_data(data)
self.token_ids.extend(token_ids)
self.labels.extend(labels)
self.lengths.append(len(token_ids))
def clear(self):
"""Clear data.
"""
self.token_ids = []
self.labels = []
self.lengths = []
def save_npz(self, filename):
"""Save data to npz format file.
"""
np.savez('%s' % filename,
token_ids=np.array(self.token_ids, dtype='int8'),
labels=np.array(self.labels, dtype='int8'),
lengths=np.array(self.lengths, dtype='int64'))
class Fluorescence(object):
"""Class for fluorescene dataset.
For more details, please check paper "Evaluating Protein Transfer Learning with TAPE".
"""
def __init__(self):
self.tokenizer = ProteinTokenizer()
self.clear()
def gen_sequence_data(self, data):
"""Genearte sequence data.
"""
amino_acids = data['amino_acids']
label = data['log_fluorescence']
token_ids = self.tokenizer.gen_token_ids(amino_acids)
return token_ids, label
def append(self, data):
"""Append data.
"""
token_ids, labels = self.gen_sequence_data(data)
self.token_ids.extend(token_ids)
self.labels.extend(labels)
self.lengths.append(len(token_ids))
def clear(self):
"""Clear data.
"""
self.token_ids = []
self.labels = []
self.lengths = []
def save_npz(self, filename):
"""Save data to npz format file.
"""
np.savez('%s' % filename,
token_ids=np.array(self.token_ids, dtype='int8'),
labels=np.array(self.labels, dtype='int8'),
lengths=np.array(self.lengths, dtype='int64'))
class Stability(object):
"""Class for stability dataset.
For more details, please check paper "Evaluating Protein Transfer Learning with TAPE".
"""
def __init__(self):
self.tokenizer = ProteinTokenizer()
self.clear()
def gen_sequence_data(self, data):
"""Genearte sequence data.
"""
amino_acids = data['amino_acids']
label = data['stability_score']
token_ids = self.tokenizer.gen_token_ids(amino_acids)
return token_ids, label
def append(self, data):
"""Append data.
"""
token_ids, labels = self.gen_sequence_data(data)
self.token_ids.extend(token_ids)
self.labels.extend(labels)
self.lengths.append(len(token_ids))
def clear(self):
"""Clear data.
"""
self.token_ids = []
self.labels = []
self.lengths = []
def save_npz(self, filename):
"""Save data to npz format file.
"""
np.savez('%s' % filename,
token_ids=np.array(self.token_ids, dtype='int8'),
labels=np.array(self.labels, dtype='int8'),
lengths=np.array(self.lengths, dtype='int64'))
if __name__ == '__main__':
dataset = SecondaryStructure()
with open('raw_data', 'r') as fin:
for line in fin:
data = json.loads(line)
dataset.append(data)
dataset.save_npz('data')
| 2.46875 | 2 |
app/emotions/serializers.py | adiptdevtomar/emotions_backend | 0 | 12771186 | from rest_framework import serializers
from emotions import models
class EmotionsSerializer(serializers.ModelSerializer):
class Meta:
model = models.EmotionModel
fields = ['text'] | 2.0625 | 2 |
utils/HFUT/HFUTLog.py | Karry-ok/hfut-check-in | 7 | 12771187 | import logging
from rich.logging import RichHandler
from rich.traceback import install
install(max_frames=1)
FORMAT = '%(message)s'
logging.basicConfig(
level='INFO',
format=FORMAT,
datefmt='[%X]',
handlers=[RichHandler(rich_tracebacks=True)]
)
log = logging.getLogger('rich')
| 2.21875 | 2 |
temboardagent/plugins/monitoring/__init__.py | pierrehilbert/temboard-agent | 0 | 12771188 | <reponame>pierrehilbert/temboard-agent
import time
import os
import logging
import json
try:
from urllib2 import HTTPError
except ImportError:
from urllib.error import HTTPError
from temboardagent.toolkit import taskmanager
from temboardagent.routing import RouteSet
from temboardagent.toolkit.configuration import OptionSpec
from temboardagent.toolkit.validators import file_, commalist
from temboardagent.queue import Queue, Message
from temboardagent.tools import now
from temboardagent.inventory import SysInfo
from temboardagent import __version__ as __VERSION__
from temboardagent.errors import UserError
from .inventory import host_info, instance_info
from .probes import (
load_probes,
probe_bgwriter,
probe_blocks,
probe_cpu,
probe_db_size,
probe_filesystems_size,
probe_loadavg,
probe_locks,
probe_memory,
probe_process,
probe_replication,
probe_sessions,
probe_tblspc_size,
probe_wal_files,
probe_xacts,
run_probes,
)
from .output import send_output, remove_passwords
logger = logging.getLogger(__name__)
workers = taskmanager.WorkerSet()
routes = RouteSet(prefix=b'/monitoring/probe')
@routes.get(b'/sessions')
def get_probe_sessions(http_context, app):
return api_run_probe(probe_sessions(app.config.monitoring), app.config)
@routes.get(b'/xacts')
def get_probe_xacts(http_context, app):
return api_run_probe(probe_xacts(app.config.monitoring), app.config)
@routes.get(b'/locks')
def get_probe_locks(http_context, app):
return api_run_probe(probe_locks(app.config.monitoring), app.config)
@routes.get(b'/blocks')
def get_probe_blocks(http_context, app):
return api_run_probe(probe_blocks(app.config.monitoring), app.config)
@routes.get(b'/bgwriter')
def get_probe_bgwriter(http_context, app):
return api_run_probe(probe_bgwriter(app.config.monitoring), app.config)
@routes.get(b'/db_size')
def get_probe_db_size(http_context, app):
return api_run_probe(probe_db_size(app.config.monitoring), app.config)
@routes.get(b'/tblspc_size')
def get_probe_tblspc_size(http_context, app):
return api_run_probe(probe_tblspc_size(app.config.monitoring), app.config)
@routes.get(b'/filesystems_size')
def get_probe_filesystems_size(http_context, app):
return api_run_probe(probe_filesystems_size(app.config.monitoring),
app.config)
@routes.get(b'/cpu')
def get_probe_cpu(http_context, app):
return api_run_probe(probe_cpu(app.config.monitoring), app.config)
@routes.get(b'/process')
def get_probe_process(http_context, app):
return api_run_probe(probe_process(app.config.monitoring), app.config)
@routes.get(b'/memory')
def get_probe_memory(http_context, app):
return api_run_probe(probe_memory(app.config.monitoring), app.config)
@routes.get(b'/loadavg')
def get_probe_loadavg(http_context, app):
return api_run_probe(probe_loadavg(app.config.monitoring), app.config)
@routes.get(b'/wal_files')
def get_probe_wal_files(http_context, app):
return api_run_probe(probe_wal_files(app.config.monitoring), app.config)
@routes.get(b'/replication')
def get_probe_replication(http_context, app):
return api_run_probe(probe_replication(app.config.monitoring), app.config)
def api_run_probe(probe_instance, config):
"""
Run a probe instance.
"""
conninfo = dict(
host=config.postgresql.host,
port=config.postgresql.port,
user=config.postgresql.user,
database=config.postgresql.dbname,
password=<PASSWORD>,
dbnames=config.monitoring.dbnames,
instance=config.postgresql.instance,
)
# Validate connection information from the config, and ensure
# the instance is available
sysinfo = SysInfo()
hostname = sysinfo.hostname(config.temboard.hostname)
instance = instance_info(conninfo, hostname)
# Set home path
probe_instance.set_home(config.temboard.home)
# Gather the data from probes
return run_probes([probe_instance], [instance], delta=False)
@workers.register(pool_size=1)
def monitoring_collector_worker(app):
"""
Run probes and push collected metrics in a queue.
"""
logger.debug("Starting monitoring collector")
config = app.config
conninfo = dict(
host=config.postgresql.host,
port=config.postgresql.port,
user=config.postgresql.user,
database=config.postgresql.dbname,
password=<PASSWORD>,
dbnames=config.monitoring.dbnames,
instance=config.postgresql.instance,
)
system_info = host_info(config.temboard.hostname)
# Load the probes to run
probes = load_probes(config.monitoring, config.temboard.home)
instance = instance_info(conninfo, system_info['hostname'])
logger.debug("Running probes")
# Gather the data from probes
data = run_probes(probes, [instance])
# Prepare and send output
output = dict(
datetime=now(),
hostinfo=system_info,
instances=remove_passwords([instance]),
data=data,
version=__VERSION__,
)
logger.debug(output)
q = Queue(os.path.join(config.temboard.home, 'metrics.q'),
max_size=1024 * 1024 * 10, overflow_mode='slide')
q.push(Message(content=json.dumps(output)))
logger.debug("Done")
@workers.register(pool_size=1)
def monitoring_sender_worker(app):
config = app.config
c = 0
logger.debug("Starting sender")
q = Queue(os.path.join(config.temboard.home, 'metrics.q'),
max_size=1024 * 1024 * 10, overflow_mode='slide')
while True:
# Let's do it smoothly..
time.sleep(0.5)
msg = q.shift(delete=False)
if msg is None:
# If we get nothing from the queue then we get out from this while
# loop.
break
try:
# Try to send data to temboard collector API
logger.debug("Trying to send data to collector")
logger.debug(config.monitoring.collector_url)
logger.debug(msg.content)
send_output(
config.monitoring.ssl_ca_cert_file,
config.monitoring.collector_url,
config.temboard.key,
msg.content
)
except HTTPError as e:
# On error 409 (DB Integrity) we just drop the message and move to
# the next message.
if int(e.code) == 409:
continue
try:
data = e.read()
data = json.loads(data)
message = data['error']
except Exception as e:
logger.debug("Can't get error details: %s", e)
message = str(e)
logger.error("Failed to send data to collector: %s", message)
logger.error("You should find details in temBoard UI logs.")
raise Exception("Failed to send data to collector.")
# If everything's fine then remove current msg from the queue
# Integrity check is made using check_msg
q.shift(delete=True, check_msg=msg)
if c > 60:
break
c += 1
logger.debug("Done")
class MonitoringPlugin(object):
PG_MIN_VERSION = 90400
s = 'monitoring'
option_specs = [
OptionSpec(s, 'dbnames', default='*', validator=commalist),
OptionSpec(s, 'scheduler_interval', default=60, validator=int),
OptionSpec(s, 'probes', default='*', validator=commalist),
OptionSpec(s, 'collector_url', default=OptionSpec.REQUIRED),
OptionSpec(s, 'ssl_ca_cert_file', default=None, validator=file_),
]
del s
def __init__(self, app, **kw):
self.app = app
self.app.config.add_specs(self.option_specs)
def load(self):
pg_version = self.app.postgres.fetch_version()
if pg_version < self.PG_MIN_VERSION:
msg = "%s is incompatible with Postgres below 9.4" % (
self.__class__.__name__)
raise UserError(msg)
self.app.router.add(routes)
self.app.worker_pool.add(workers)
workers.schedule(
id='monitoring_collector',
redo_interval=self.app.config.monitoring.scheduler_interval,
)(monitoring_collector_worker)
workers.schedule(
id='monitoring_sender',
redo_interval=self.app.config.monitoring.scheduler_interval,
)(monitoring_sender_worker)
self.app.scheduler.add(workers)
def unload(self):
self.app.scheduler.remove(workers)
self.app.worker_pool.remove(workers)
self.app.router.remove(routes)
self.app.config.remove_specs(self.option_specs)
| 1.921875 | 2 |
update.py | forest-snow/clime-ui | 2 | 12771189 | <gh_stars>1-10
"""Update embeddings based on user feedback."""
from ast import literal_eval
import argparse
import csv
from itertools import islice
import json
import logging
import numpy as np
import os
import utils
import torch
import torch.optim as optim
from utils import save_embeds, load_embeds
def flatten(l):
return [item for sublist in l for item in sublist]
def twod_map(mapping, array):
new_array = [[mapping(j) for j in i] for i in array]
return new_array
def reindex(E, K, P, N):
"""Re-index to only use words that changes after update."""
indices = list(set(K + flatten(P) + flatten(N)))
E_ = E[indices]
map_to_subset = lambda i: indices.index(i)
K_ = list(map(map_to_subset, K))
P_ = twod_map(map_to_subset, P)
N_ = twod_map(map_to_subset, N)
return E_, K_, P_, N_, indices
def update(E, K, P, N, reg, n_iter):
"""Update embeddings."""
E_, K_, P_, N_, indices = reindex(E, K, P, N)
E_orig = E_.detach().clone()
E_.requires_grad = True
optimizer = optim.Adam([E_])
for i in range(n_iter):
optimizer.zero_grad()
cost = 0
for k, pk, nk in zip(K_, P_, N_):
cost += torch.mv(E_[pk], E_[k]).sum() - torch.mv(E_[nk], E_[k]).sum()
cost += reg * (E_orig - E_).pow(2).sum() # regularizer
cost.backward()
optimizer.step()
E[indices] = E_
return E
def parse_feedback(feedback_csv, n_keywords):
feedback = {}
with open(feedback_csv, 'r') as csvfile:
if n_keywords >= 0:
reader = islice(csv.DictReader(csvfile), n_keywords)
else:
reader = csv.DictReader(csvfile)
for row in reader:
feedback[row['keyword']] = {
'pos1':literal_eval(row['pos1']),
'pos2':literal_eval(row['pos2']),
'neg1':literal_eval(row['neg1']),
'neg2':literal_eval(row['neg2']),
}
return feedback
def feedback_to_indices(feedback, words_src, words_tgt):
K, P, N = [], [], []
shift = len(words_src)
for keyword in feedback:
K.append(words_src[keyword])
P.append(
[words_src[word] for word in feedback[keyword]['pos1']]
+ [words_tgt[word] + shift for word in feedback[keyword]['pos2']]
)
N.append(
[words_src[word] for word in feedback[keyword]['neg1']] \
+ [words_tgt[word] + shift for word in feedback[keyword]['neg2']]
)
return K, P, N
def main():
logging.basicConfig(format='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--src-emb', required=True, help='source language embedding directory')
parser.add_argument('--tgt-emb', required=True, help='target language embedding directory')
parser.add_argument('--feedback', required=True, help='feedback CSV file')
parser.add_argument('--n_keywords', default=-1, type=int,
help='number of keywords (default: use all)')
parser.add_argument('--out-src', required=True,
help='output directory for updated source language embeddings')
parser.add_argument('--out-tgt', required=True,
help='output directory for updated target language embeddings')
parser.add_argument('--iter', default=10000, type=int, help='number of iterations')
parser.add_argument('--reg', default=1, type=float, help='regularizer strength')
parser.add_argument('--seed', default=31, type=int, help='random seed')
args = parser.parse_args()
logging.info(vars(args))
np.random.seed(args.seed)
torch.manual_seed(args.seed)
E_src, words_src = load_embeds(args.src_emb)
E_tgt, words_tgt = load_embeds(args.tgt_emb)
E = torch.cat((E_src, E_tgt))
logging.info('Loading user feedback')
feedback = parse_feedback(args.feedback, args.n_keywords)
K, P, N = feedback_to_indices(feedback, words_src, words_tgt)
logging.info('Refining embeddings')
E_new = update(E, K, P, N, args.reg, args.iter)
E_src_new = E_new[:len(words_src)]
E_tgt_new = E_new[len(words_src):]
logging.info('Save embeddings')
save_embeds(args.out_src, E_src_new, words_src)
save_embeds(args.out_tgt, E_tgt_new, words_tgt)
if __name__ == '__main__':
main()
| 2.1875 | 2 |
physio2go/exercises/urls.py | hamole/physio2go | 0 | 12771190 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^$',
view=views.program_list,
name='program_list'
),
url(
regex=r'(?P<program_slug>[-\w]+)/$',
view=views.program_detail,
name='program_detail'
),
url(
regex=r'(?P<program_slug>[-\w]+)/entry/(?P<programentryid>\d+)/$',
view=views.program_entry,
name='program_entry'
),
url(
regex=r'(?P<program_slug>[-\w]+)/(?P<exercise_slug>[-\w]+)$',
view=views.exercise_detail,
name='exercise_detail'
),
url(
regex=r'(?P<program_slug>[-\w]+)/entry/(?P<programentryid>\d+)/(?P<exercise_slug>[-\w]+)/(?P<exerciseentryid>\d+)/$',
view=views.exercise_entry,
name='exercise_entry'
),
]
| 1.953125 | 2 |
app/config.py | kowabunga314/LifeLoot | 0 | 12771191 | <gh_stars>0
from types import SimpleNamespace
# to get a string like this run:
# openssl rand -hex 32
SECRET_KEY = "<KEY>"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
# Tags for sorting requests in OpenAPI
TAGS = SimpleNamespace(
USER='Users',
GAME='Games',
AUTH='Authentication'
) | 2.453125 | 2 |
usher/tcp_server.py | lukecampbell/usher | 1 | 12771192 | <gh_stars>1-10
#!/usr/bin/env python
import gevent.monkey
gevent.monkey.patch_all()
from gevent.server import StreamServer
from usher.server import UsherServer
from usher.log import log, DEBUG
from struct import pack, unpack
'''
NOP Message
0x00
Acquire Message
'''
class MessageParser:
NOP_MESSAGE = 0x00
ACQUIRE_MESSAGE = 0x01
RELEASE_MESSAGE = 0x02
def __init__(self, socket):
self.socket = socket
def read_uint(self, bytes):
buf = None
val = None
if bytes == 1:
buf = self.read(1)
val = unpack('B', buf)[0]
if bytes == 2:
buf = self.read(2)
val = unpack('H', buf)[0]
if bytes == 4:
buf = self.read(4)
val = unpack('I', buf)[0]
if bytes == 8:
buf = self.read(8)
val = unpack('Q', buf)[0]
return val
def read_int(self, bytes):
buf = None
val = None
if bytes == 1:
buf = self.read(1)
val = unpack('b', buf)[0]
if bytes == 2:
buf = self.read(2)
val = unpack('h', buf)[0]
if bytes == 4:
buf = self.read(4)
val = unpack('i', buf)[0]
if bytes == 8:
buf = self.read(8)
val = unpack('q', buf)[0]
return val
def read(self, bytes):
with gevent.timeout.Timeout(10):
return self.socket.recv(bytes)
def send(self, message):
with gevent.timeout.Timeout(10):
bytes_sent = 0
while bytes_sent < len(message):
bytes_sent += self.socket.send(message[bytes_sent:])
return bytes_sent
def parse(self):
request = self.read_uint(1)
return request
def send_acquire(self, namespace, expiration, timeout):
message = pack('<BBBH', MessageParser.ACQUIRE_MESSAGE, expiration, timeout, len(namespace))
message += namespace
return self.send(message)
def read_acquire(self):
expiration = self.read_uint(1)
timeout = self.read_uint(1)
strlen = self.read_uint(2)
namespace = self.read(strlen)
return (namespace, expiration, timeout)
def send_acquire_response(self, status, key):
message = pack('<B', status)
if status > 0:
message += key
return self.send(message)
def read_acquire_response(self):
status = self.read_uint(1)
key = None
if status > 0:
key = self.read(16)
return status, key
def send_release(self, namespace, key):
message = pack('<BH', MessageParser.RELEASE_MESSAGE, len(namespace))
message += namespace
message += key
return self.send(message)
def read_release(self):
strlen = self.read_uint(2)
namespace = self.read(strlen)
key = self.read(16)
return namespace, key
def send_release_response(self, status):
message = pack('<B', status)
return self.send(message)
def read_release_response(self):
status = self.read_uint(1)
return status
def send_nop(self):
message = pack('<B', MessageParser.NOP_MESSAGE)
return self.send(message)
def read_nop_response(self):
status = self.read_uint(1)
return status
class UsherTCPServer(StreamServer):
def __init__(self, *args, **kwargs):
self.server = UsherServer()
StreamServer.__init__(self, *args, **kwargs)
def handle(self, socket, addr):
log.info('%s - Accepted', (addr,))
mp = MessageParser(socket)
mtype = mp.parse()
if mtype == MessageParser.NOP_MESSAGE:
log.debug('%s - NOP', (addr,))
mp.send_nop()
return
elif mtype == MessageParser.ACQUIRE_MESSAGE:
log.debug('%s - Acquire', (addr,))
namespace, expiration, timeout = mp.read_acquire()
log.debug('%s - (%s/%s) Requested', addr, namespace, expiration)
status, key = self.server.acquire_lease(namespace, expiration, timeout)
if log.isEnabledFor(DEBUG):
if key:
h = ''.join([hex(ord(i)).replace('0x','') for i in key])
else:
h = 'None'
log.debug('%s - status: %s key %s', addr, status, h)
mp.send_acquire_response(status, key)
return
elif mtype == MessageParser.RELEASE_MESSAGE:
log.debug('%s - Release', (addr,))
namespace, key = mp.read_release()
log.debug('%s - (%s) Release', (addr,), namespace)
status = self.server.free_lease(namespace, key)
log.debug('%s - status: %s', (addr,), status)
mp.send_release_response(status)
return
if __name__ == '__main__':
print 'Listening on 9090'
UsherTCPServer(('0.0.0.0', 9090)).serve_forever()
| 2.3125 | 2 |
hard-gists/6094977/snippet.py | jjhenkel/dockerizeme | 21 | 12771193 | <reponame>jjhenkel/dockerizeme
import smbus
import getopt
import sys
from time import *
from time import gmtime, strftime
# TODO: Factor out all device_write calls to some PCF8574 specific module ...
# will be different with another io expander
# communication from expander to display: high nibble first, then low nibble
# communication via i2c to the PCF 8547: bits are processed from highest to lowest (send P7 bit first)
# General i2c device class so that other devices can be added easily
class i2c_device:
def __init__(self, addr, port):
self.addr = addr
self.bus = smbus.SMBus(port)
def write(self, byte):
self.bus.write_byte(self.addr, byte)
def read(self):
return self.bus.read_byte(self.addr)
def read_nbytes_data(self, data, n): # For sequential reads > 1 byte
return self.bus.read_i2c_block_data(self.addr, data, n)
class ioexpander:
def __init__(self):
pass
class lcd:
#initializes objects and lcd
# LCD Commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# Flags for display on/off control
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# Flags for display entry mode
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# Flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# flags for function set
LCD_8BITMODE = 0x10
LCD_4BITMODE = 0x00
LCD_2LINE = 0x08
LCD_1LINE = 0x00
LCD_5x10DOTS = 0x04
LCD_5x8DOTS = 0x00
# flags for backlight control
LCD_BACKLIGHT = 0x08
LCD_NOBACKLIGHT = 0x00
EN = 0b00000100 # Enable bit
RW = 0b00000010 # Read/Write bit
RS = 0b00000001 # Register select bit
'''
new pinout:
----------
0x80 P7 - - D7
0x40 P6 - - D6
0x20 P5 - - D5
0x10 P4 - - D4
-----------
0x08 P3 - - BL Backlight ???
0x04 P2 - - EN Starts Data read/write
0x02 P1 - - RW low: write, high: read
0x01 P0 - - RS Register Select: 0: Instruction Register (IR) (AC when read), 1: data register (DR)
'''
def __init__(self, addr, port, withBacklight=True, withOneTimeInit=False):
'''
device writes!
crosscheck also http://www.monkeyboard.org/tutorials/81-display/70-usb-serial-to-hd44780-lcd
here a sequence is listed
'''
self.displayshift = (self.LCD_CURSORMOVE |
self.LCD_MOVERIGHT)
self.displaymode = (self.LCD_ENTRYLEFT |
self.LCD_ENTRYSHIFTDECREMENT)
self.displaycontrol = (self.LCD_DISPLAYON |
self.LCD_CURSOROFF |
self.LCD_BLINKOFF)
if withBacklight:
self.blFlag=self.LCD_BACKLIGHT
else:
self.blFlag=self.LCD_NOBACKLIGHT
self.lcd_device = i2c_device(addr, port)
# we can initialize the display only once after it had been powered on
if(withOneTimeInit):
self.lcd_device.write(0x20)
self.lcd_strobe()
sleep(0.0100) # TODO: Not clear if we have to wait that long
self.lcd_write(self.LCD_FUNCTIONSET | self.LCD_4BITMODE | self.LCD_2LINE | self.LCD_5x8DOTS) # 0x28
self.lcd_write(self.LCD_DISPLAYCONTROL | self.displaycontrol) # 0x08 + 0x4 = 0x0C
self.lcd_write(self.LCD_ENTRYMODESET | self.displaymode) # 0x06
self.lcd_write(self.LCD_CLEARDISPLAY) # 0x01
self.lcd_write(self.LCD_CURSORSHIFT | self.displayshift) # 0x14
self.lcd_write(self.LCD_RETURNHOME)
# clocks EN to latch command
def lcd_strobe(self):
self.lcd_device.write((self.lcd_device.read() | self.EN | self.blFlag)) # | 0b0000 0100 # set "EN" high
self.lcd_device.write(( (self.lcd_device.read() | self.blFlag) & 0xFB)) # & 0b1111 1011 # set "EN" low
# write data to lcd in 4 bit mode, 2 nibbles
# high nibble is sent first
def lcd_write(self, cmd):
#write high nibble first
self.lcd_device.write( (cmd & 0xF0) | self.blFlag )
hi= self.lcd_device.read()
self.lcd_strobe()
# write low nibble second ...
self.lcd_device.write( (cmd << 4) | self.blFlag )
lo= self.lcd_device.read()
self.lcd_strobe()
self.lcd_device.write(self.blFlag)
# write a character to lcd (or character rom) 0x09: backlight | RS=DR
# works as expected
def lcd_write_char(self, charvalue):
controlFlag = self.blFlag | self.RS
# write high nibble
self.lcd_device.write((controlFlag | (charvalue & 0xF0)))
self.lcd_strobe()
# write low nibble
self.lcd_device.write((controlFlag | (charvalue << 4)))
self.lcd_strobe()
self.lcd_device.write(self.blFlag)
# put char function
def lcd_putc(self, char):
self.lcd_write_char(ord(char))
def _setDDRAMAdress(self, line, col):
# we write to the Data Display RAM (DDRAM)
# TODO: Factor line offsets for other display organizations; this is for 20x4 only
if line == 1:
self.lcd_write(self.LCD_SETDDRAMADDR | (0x00 + col) )
if line == 2:
self.lcd_write(self.LCD_SETDDRAMADDR | (0x40 + col) )
if line == 3:
self.lcd_write(self.LCD_SETDDRAMADDR | (0x14 + col) )
if line == 4:
self.lcd_write(self.LCD_SETDDRAMADDR | (0x54 + col) )
# put string function
def lcd_puts(self, string, line):
self._setDDRAMAdress(line, 0)
for char in string:
self.lcd_putc(char)
# clear lcd and set to home
def lcd_clear(self):
# self.lcd_write(0x10)
self.lcd_write(self.LCD_CLEARDISPLAY)
# self.lcd_write(0x20)
self.lcd_write(self.LCD_RETURNHOME)
# add custom characters (0 - 7)
def lcd_load_custon_chars(self, fontdata):
self.lcd_device.bus.write(0x40);
for char in fontdata:
for line in char:
self.lcd_write_char(line)
# Let them know how it works
def usage():
print 'Usage: lcdui.py --init --debug --backlightoff'
# Handle the command line arguments
def main():
initFlag=False
debug=False
backlight=True
try:
opts, args = getopt.getopt(sys.argv[1:],"idb",["init","debug","backlightoff"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-i", "--init"):
initFlag = True
elif opt in ("-d", "--debug"):
debug = True
elif opt in ("-b", "--backlightoff"):
backlight = False
if initFlag:
print "Doing initial init ..."
else:
print "Skipping init ..."
device = lcd(0x27,1,backlight, initFlag)
device.lcd_puts("01234567890123456789",1)
device.lcd_puts("012345 Zeile 2 56789",2)
device.lcd_puts("012345 Zeile 3 56789",3)
device.lcd_puts(strftime("%Y-%m-%d %H:%M:%S", gmtime()),4)
sleep(3)
device.lcd_clear()
device.lcd_puts(" Simple Clock ",1)
while True:
device.lcd_puts(strftime("%Y-%m-%d %H:%M:%S ", gmtime()),3)
sleep(1)
if __name__ == '__main__':
main()
| 2.59375 | 3 |
open_connect/connectmessages/tests/__init__.py | ofa/connect | 66 | 12771194 | """Custom TestCase and helpers for connectmessages tests."""
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.messages.storage.fallback import FallbackStorage
from django.test import RequestFactory
from django.utils.timezone import now
from model_mommy import mommy
from open_connect.connect_core.utils.basetests import ConnectTestCase
from open_connect.groups.models import Group
from open_connect.connectmessages.models import Message, Thread, UserThread
USER_MODEL = get_user_model()
MESSAGE_TEXT = (
'This has been a test. This has been a test.'
' This has been a test. This has been a test.'
' This has been a test. This has been a test.'
' This has been a test. This has been a test.'
)
THREAD_SUBJECT = 'Test message'
class ConnectMessageTestCase(ConnectTestCase):
"""Helper TestCase for connectmessages app."""
# pylint: disable=invalid-name
@classmethod
def setUpClass(cls):
"""Setup the TestCase class"""
super(ConnectMessageTestCase, cls).setUpClass()
cls.group1 = mommy.make(
Group, tos_accepted_at=now())
cls.group2 = mommy.make(Group)
cls.superuser.add_to_group(cls.group1.pk)
cls.superuser.add_to_group(cls.group2.pk)
cls.normal_user.add_to_group(cls.group1.pk)
cls.staff_user.add_to_group(cls.group1.pk)
cls.thread1 = mommy.make(
Thread, group=cls.group1, subject=THREAD_SUBJECT)
cls.message1 = mommy.make(
Message, thread=cls.thread1, sender=cls.superuser,
text=MESSAGE_TEXT, status='approved')
cls.message2 = mommy.make(
Message, thread=cls.thread1, sender=cls.normal_user,
text=MESSAGE_TEXT, status='approved')
cls.thread2 = mommy.make(
Thread, group=cls.group2, subject=THREAD_SUBJECT)
cls.message3 = mommy.make(
Message, thread=cls.thread2, sender=cls.superuser,
text=MESSAGE_TEXT, status='approved')
cls.directthread1 = mommy.make(
Thread, thread_type='direct', subject=THREAD_SUBJECT)
cls.directmessage1 = mommy.make(
Message,
thread=cls.directthread1,
sender=cls.user1,
text=MESSAGE_TEXT,
status='approved'
)
mommy.make(UserThread, user=cls.normal_user, thread=cls.directthread1)
mommy.make(UserThread, user=cls.staff_user, thread=cls.directthread1)
cls.request_factory = RequestFactory()
cls.request = cls.request_factory.get('/')
setattr(cls.request, 'session', 'session')
messages = FallbackStorage(cls.request)
setattr(cls.request, '_messages', messages)
cls.request.user = cls.superuser
cls._group = None
# pylint: disable=invalid-name
def setUp(self):
"""Setup the test"""
self.client.post(
reverse('account_login'),
{'login': '<EMAIL>', 'password': '<PASSWORD>'})
def message(self, **kwargs):
"""Create a new non-persistent Message."""
return mommy.prepare(
Message,
thread=kwargs.get('thread', self.thread1),
sender=kwargs.get('user', self.superuser),
text=kwargs.get('message', MESSAGE_TEXT),
status=kwargs.get('status', 'approved')
)
@property
def group(self):
"""Cache and return the test group."""
if not self._group:
self._group = mommy.make(
Group, group__name='Test group', published=True)
return self._group
# pylint: disable=invalid-name
def assertSuccess(self, response):
"""Helper method for asserting a response object was successful."""
self.assertEqual(response.status_code, 200)
| 2.265625 | 2 |
object_store_sdk/client.py | easyopsapis/easyops-api-python | 5 | 12771195 | <filename>object_store_sdk/client.py
# -*- coding: utf-8 -*-
import object_store_sdk.api.object_store.object_store_client
class Client(object):
def __init__(self, server_ip="", server_port=0, service_name=""):
self.object_store = object_store_sdk.api.object_store.object_store_client.ObjectStoreClient(server_ip, server_port, service_name)
| 2.203125 | 2 |
tools/libs/utils.py | gurka/OldSchoolTibia | 3 | 12771196 | <reponame>gurka/OldSchoolTibia<gh_stars>1-10
def read_u8(f):
"""Reads an unsigned byte from the file object f.
"""
temp = f.read(1)
if not temp:
raise EOFError("EOF")
return int.from_bytes(temp, byteorder='little', signed=False)
def read_u16(f):
"""Reads a two byte unsigned value from the file object f.
"""
temp = f.read(2)
if not temp:
raise EOFError("EOF")
return int.from_bytes(temp, byteorder='little', signed=False)
def read_u32(f):
"""Reads a four byte unsigned value from the file object f.
"""
temp = f.read(4)
if not temp:
raise EOFError("EOF")
return int.from_bytes(temp, byteorder='little', signed=False)
def write_u8(f, v):
"""Writes the value v as an unsigned byte to the file object f.
"""
f.write(v.to_bytes(1, byteorder='little', signed=False))
def write_u16(f, v):
"""Writes the value v as a two byte unsigned value to the file object f.
"""
f.write(v.to_bytes(2, byteorder='little', signed=False))
def write_u32(f, v):
"""Writes the value v as a four byte unsigned value to the file object f.
"""
f.write(v.to_bytes(4, byteorder='little', signed=False))
def print_bytes(data):
"""Prints the bytes in data formatted to stdout.
"""
# Generator that returns l in chunks of size n
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
for chunk in chunks(data, 16):
# Lint to be printed
line = ""
# Add hex
str_hex = [ "{:02X}".format(byte) for byte in chunk ]
line += " ".join(str_hex)
if len(str_hex) < 16:
# Pad if less than 16 bytes
line += " " * (16 - len(str_hex))
# Add ascii
line += " |"
str_ascii = ["{:c}".format(byte) if 31 < byte < 127 else "." for byte in chunk]
line += "".join(str_ascii)
if len(str_ascii) < 16:
# Pad if less than 16 bytes
line += " " * (16 - len(str_ascii))
line += "|"
# Print line
print(line)
| 3.375 | 3 |
generator.py | kimvc7/DataSetGenerator | 0 | 12771197 | from random import randint
from matplotlib import pyplot as plt
import matplotlib
import argparse
import numpy as np
import scipy.ndimage
import json
import copy
import operator
VECTORS={0:(0,-1), 1:(1,-1),2:(1,0),3:(1,1),4:(0,1),5:(-1,1),6:(-1,0),7:(-1,-1)}
INV_VECTORS={(0,-1):0, (1,-1):1,(1,0):2,(1,1):3,(0,1):4,(-1,1):5,(-1,0):6,(-1,-1):7}
NUM_NEIGHBORS=8
RIGHT_TURN=2
#adds two vectors
def sum_vectors(v1,v2):
return tuple(map(operator.add, v1,v2))
#Finds the neighbor of a point with specific distance and direction
def neighbor((x,y),direction,turns,distance):
return sum_vectors((x,y),tuple(map(lambda x: x*distance,VECTORS[(direction+turns)%NUM_NEIGHBORS])))
class Image():
def __init__(self,width,height,start_x,start_y,outside,inside,border,thickness,directions,space,min_width,max_width,min_length,max_length,obj_color):
self.WIDTH=width
self.HEIGHT=height
self.LATTICE=[[outside]*height for i in xrange(width)]
self.outside=outside #exterior color
self.inside=inside #interior color
self.border=border #border color
self.current_site=(start_x,start_y)
self.current_vector=0
self.old_vector=0
self.turns=0 #number of turns in the path
self.end=self.current_site #location where the path ends
self.thickness=thickness #thickness of the path
self.directions=directions #allowed directions for turns
self.space=space #required space before path crosses itself
self.min_width=min_width #min thickness of path
self.max_width=max_width #max whickness of path
self.min_length=min_length #min length of a turn
self.max_length=max_length #max length of a turn
self.obj_color=obj_color #color for the object
self.filling=(100,90,80) #color to fill gaps
self.cumulative=0
self.path=[]
#replace old color with new color
def change(self,old_color,new_color):
self.LATTICE=map(lambda row: map(lambda s: new_color if s==old_color else s, row), self.LATTICE)
#True iff (x,y) has valid coordinates
def is_valid(self,(x,y)):
return x>=0 and x<self.WIDTH and y>=0 and y<self.HEIGHT
#True iff moving from (x,y) in direction n makes the path intersect itself.
def crosses(self, (x,y), n):
return self.is_inside(neighbor((x,y),n,1,1)) and self.is_inside(neighbor((x,y),n,-1,1))
#True iff (x,y) has valid coordinates but is not in the interior
def is_not_inside(self,(x,y)):
return self.is_valid((x,y)) and self.LATTICE[x][y]!=self.inside
#True iff (x,y) is in the interior
def is_inside(self,(x,y)):
return self.is_valid((x,y)) and self.LATTICE[x][y]==self.inside
#True iff it is possible to move m times from (x,y) in direction (a,b) without the path crossing itself.
def has_space(self,(x,y), (a,b)):
n=INV_VECTORS[(a,b)]
for i in xrange(1,self.space):
if not self.is_valid((x+i*a,y+i*b)) or self.is_inside((x+i*a,y+i*b)) or self.crosses((x+(i-1)*a,y+(i-1)*b),n):
return False
return True
#True if the neighbor in question is a valid next position
def valid_neighbor(self,neighbor,direction):
(x,y)=self.current_site
return self.is_not_inside(neighbor) and not self.crosses((x,y),direction) and self.has_space(neighbor,VECTORS[direction])
#Returns a list with all possible locations for next move.
def valid_neighbors(self):
neighbors=list(map(lambda i:(neighbor(self.current_site,self.current_vector,i,1),(self.current_vector+i)%8),self.directions))
return filter(lambda (neighbor,i): self.valid_neighbor(neighbor,i),neighbors)
#changes the color of a point if the restriction holds
def set_color(self,(x,y),color,restriction):
if restriction((x,y)):
self.LATTICE[x][y]=color
#Set border of random thickness for the current position
def fill(self,minimum,maximum,step):
if self.cumulative>1:
width=randint(max(self.thickness-1,minimum),min(self.thickness+1,maximum))
if width!=self.thickness:
self.cumulative=0
else:
width=self.thickness
self.cumulative+=1
if step==0:
width=self.thickness
for i in range(width):
for j in {RIGHT_TURN,-RIGHT_TURN}:
(x,y)=neighbor(self.current_site,self.current_vector,j,i)
self.set_color((x,y),self.inside,self.is_valid)
if self.current_vector%RIGHT_TURN==1:
self.set_color((x+np.sign(j),y),self.filling,self.is_not_inside)
self.thickness=width
#fixed the corner when the path changes direction
def fix_corner(self):
difference=self.current_vector-self.old_vector
if abs(difference) in {0,4}:
return
reference=self.old_vector
direction=self.old_vector
if self.current_vector%2==0:
reference=self.current_vector
direction=self.current_vector+4
for j in range(1,self.thickness-1):
for i in range(self.thickness):
for k in {RIGHT_TURN,-RIGHT_TURN}:
(x,y)=neighbor(self.current_site,reference,k,i)
self.set_color(neighbor((x,y),direction,0,j),self.filling,self.is_valid)
#Counts how many neighbors of the site (x,y) at a given distance have a specific color.
def count_nbr_color(self,x,y,color,distance):
count=0
for vector in VECTORS.keys():
(n1,n2)=neighbor((x,y),vector,0,distance)
if self.is_valid((n1,n2)) and self.LATTICE[n1][n2]==color:
count+=1
return count
#Removes isolated points with border color
def remove_spots(self):
for y in xrange(self.HEIGHT):
for x in xrange(self.WIDTH):
if self.LATTICE[x][y]==self.border and self.count_nbr_color(x,y,self.inside,1)==0:
self.LATTICE[x][y]=self.outside
#Redefine border to have thickness 1
def fix_border(self):
for y in xrange(self.HEIGHT):
for x in xrange(self.WIDTH):
if self.LATTICE[x][y]!=self.outside:
if self.count_nbr_color(x,y,self.outside,1)>0:
if self.count_nbr_color(x,y,self.inside,1)>0 or self.count_nbr_color(x,y,self.filling,1)>0:
self.LATTICE[x][y]=self.border
else:
self.LATTICE[x][y]=self.outside
else:
self.LATTICE[x][y]=self.inside
#Create a closed random path with at most n turns
def create_random_path(self,n):
for j in xrange(n):
self.turns+=1
valid_nbrs=self.valid_neighbors()
if len(valid_nbrs)!=0:
nbr=valid_nbrs[randint(0,len(valid_nbrs)-1)] #choose random neighbor
self.current_vector=nbr[1] #update direction
side_length=randint(self.min_length,self.max_length)
for i in xrange(side_length): #move in the same direction for random length
self.LATTICE[self.current_site[0]][self.current_site[1]]=self.inside
self.path.append(self.current_site)
self.fill(self.min_width,self.max_width,i)
vector=VECTORS[nbr[1]]
(x,y)=sum_vectors(self.current_site,vector)
if i==0 and j!=0:
self.fix_corner()
if i!=side_length-1: #check if future position is valid
if self.is_not_inside((x,y)) and not self.crosses(self.current_site,nbr[1]) and self.has_space((x,y),vector):
self.current_site=(x,y)
else:
break
self.old_vector=self.current_vector
self.current_vector=nbr[1]
else:
self.end=self.current_site
break
#display image
def display(self):
image = np.array(self.LATTICE, dtype=np.uint8)
plt.imshow(image, interpolation='none')
plt.show()
#Returns a set with all neighbor sites of (x,y)
def get_neighbors(self,(x,y)):
neighbors=set()
for vector in INV_VECTORS.keys():
nbr=sum_vectors((x,y),vector)
if self.is_valid(nbr):
neighbors.add(nbr)
return neighbors
#assigns a new color to all neighbors of a point.
def paint_neighbors(self,(x,y)):
for vector in INV_VECTORS.keys():
self.set_color(sum_vectors((x,y),vector),self.obj_color, self.is_valid)
#draws an object inside of the closed path
def obj_inside(self):
(x,y)=self.path[randint(2,len(self.path)-3)]
self.LATTICE[x][y]=self.obj_color
self.paint_neighbors((x,y))
#draws an object outside of the closed path
def obj_outside(self):
while True:
y=randint(0,self.HEIGHT-1)
for a in xrange(min(self.WIDTH-self.end[0],self.end[0])):
for x in {a+self.WIDTH/2,-a+self.HEIGHT/2}:
if self.LATTICE[x][y]==self.outside and self.count_nbr_color(x,y,self.outside,1)==8:
self.LATTICE[x][y]=self.obj_color
self.paint_neighbors((x,y))
return
#Create and display a new image
def create_image(width,height, start_x, start_y,background, interior, border, path_width, directions, space, \
min_width,max_width, min_length, max_length, object_color, min_turns, max_turns, inside):
count=0
while count<min_turns:
image=Image(width,height,start_x,start_y,background,interior,border,path_width,directions,
space,min_width,max_width,min_length,max_length,object_color)
image.create_random_path(max_turns)
count=image.turns
image.fix_border()
image.remove_spots()
if inside:
image.obj_inside()
else:
image.obj_outside()
image.change(interior,background)
image.display()
def create_data_set(width,height, background, interior, border, path_width, directions, space, \
min_width,max_width, min_length, max_length, object_color, min_turns, max_turns,\
size, filename ):
images={}
m=0
for j in xrange(size):
start_x=randint(width/3,2*width/3)
start_y=randint(height/3,2*height/3)
count=0
while count<min_turns:
image=Image(width,height,start_x,start_y,background,interior,border,path_width,directions,
space,min_width,max_width,min_length,max_length,object_color)
image.create_closed_path(max_turns)
count=image.turns
image.fix_border()
image.remove_spots()
u=randint(0,1) #randomly choose if the object is inside or outside
r=(0,0)
if u==0:
image.obj_inside()
image.change(interior,background)
r=(image.lattice(),1)
else:
image.obj_outside()
image.change(interior,background)
r=(image.lattice(),0)
images[m]=r
m+=1
with open(filename,"w") as f:
json.dump(images,f)
#Examples:
#data_set(30,30,0,2,1,5,{1,2,3,4,5,6,7,8},3,3,5,5,15,-1,3,3,100000,"dataset.json")
#create_image(200,200,60,60,(0,0,0),(0,250,250),(216,100,123),7,{1,2,3,4},18,5,8,35,40,(1,196,255),20,40,True)
| 3.015625 | 3 |
python/2.OOP/2Inheritance/5.extends_any.py | dunitian/BaseCode | 25 | 12771198 | # 多继承引入
class Father(object):
def eat(self):
print("文雅的吃饭")
class Mom(object):
def run(self):
print("小碎步")
class Son(Father, Mom):
pass
def main():
son = Son()
son.eat()
son.run()
if __name__ == '__main__':
main()
| 3.734375 | 4 |
smc-monitoring/smc_monitoring/monitors/connections.py | kobaan/fp-NGFW-SMC-python | 17 | 12771199 | <filename>smc-monitoring/smc_monitoring/monitors/connections.py
"""
A connection query returns all currently connected sessions on the
given target.
Create a query to obtain all connections for a given engine::
query = ConnectionQuery('sg_vm')
Add a timezone to the query::
query.format.timezone('CST')
Add a filter to only get connections if the source address is 172.18.1.252::
query.add_in_filter(FieldValue(LogField.SRC), [IPValue('172.18.1.252')])
Only connections that match a specific service::
query.add_in_filter(FieldValue(LogField.SERVICE), [ServiceValue('TCP/443', 'UDP/53')])
Execute query and return raw results::
for records in query.fetch_raw():
...
Execute query and return as an :class:`.Connection` element::
for records in query.fetch_as_element():
...
Retrieving live streaming results::
for records in query.fetch_live():
...
.. seealso:: :class:`smc_monitoring.models.filters` for more information on creating filters
"""
from smc_monitoring.models.query import Query
from smc_monitoring.models.constants import LogField
class ConnectionQuery(Query):
"""
Show all current connections on the specified target.
:ivar list field_ids: field IDs are the default fields for this entry type
and are constants found in :class:`smc_monitoring.models.constants.LogField`
:param str target: name of target engine/cluster
"""
location = "/monitoring/session/socket"
field_ids = [
LogField.TIMESTAMP,
LogField.NODEID,
LogField.SRC,
LogField.SPORT,
LogField.SRCZONE,
LogField.DST,
LogField.DPORT,
LogField.DSTZONE,
LogField.SERVICE,
LogField.IPSAPPID,
LogField.PROTOCOL,
LogField.STATE,
]
def __init__(self, target, **kw):
super(ConnectionQuery, self).__init__("CONNECTIONS", target, **kw)
def fetch_as_element(self, **kw):
"""
Fetch the results and return as a Connection element. The original
query is not modified.
:return: generator of elements
:rtype: :class:`.Connection`
"""
clone = self.copy()
clone.format.field_format("id")
for custom_field in ["field_ids", "field_names"]:
clone.format.data.pop(custom_field, None)
for list_of_results in clone.fetch_raw(**kw):
for entry in list_of_results:
yield Connection(**entry)
class Connection(object):
"""
Connection represents a state table entry. This is the result of
making a :class:`~ConnectionQuery` and using
:meth:`~ConnectionQuery.fetch_as_element`.
"""
def __init__(self, **data):
self.cxn = data
@property
def timestamp(self):
"""
Timestamp of this connection. It is recommended to set the timezone
on the query to view this timestamp in the systems local time.
For example::
query.format.timezone('CST')
:return: timestamp in string format
:rtype: str
"""
return self.cxn.get(str(LogField.TIMESTAMP))
@property
def engine(self):
"""
The engine/cluster for this state table entry
:return: engine or cluster for this entry
:rtype: str
"""
return self.cxn.get(str(LogField.NODEID))
@property
def source_addr(self):
"""
Source address for this entry
:rtype: str
"""
return self.cxn.get(str(LogField.SRC))
@property
def dest_addr(self):
"""
Destination address for this entry
:rtype: str
"""
return self.cxn.get(str(LogField.DST))
@property
def service(self):
"""
Service for this entry
:return: service (HTTP/HTTPS, etc)
:rtype: str
"""
return self.cxn.get(str(LogField.SERVICE))
@property
def protocol(self):
"""
Protocol for this entry
:return: protocol (UDP/TCP/ICMP, etc)
:rtype: str
"""
return self.cxn.get(str(LogField.PROTOCOL), "ANY")
@property
def source_port(self):
"""
Source port for the entry.
:rtype: int
"""
return int(self.cxn.get(str(LogField.SPORT), 0))
@property
def dest_port(self):
"""
Destination port for the entry.
:rtype: int
"""
return int(self.cxn.get(str(LogField.DPORT), 0))
@property
def state(self):
"""
State of the connection.
:return: state, i.e. UDP established, TCP established, etc.
:rtype: str
"""
return self.cxn.get(str(LogField.STATE))
def __str__(self):
return "{}(src={},dst={},proto={},dst_port={},state={})".format(
self.__class__.__name__,
self.source_addr,
self.dest_addr,
self.protocol,
self.dest_port,
self.state,
)
def __repr__(self):
return str(self)
| 2.578125 | 3 |
netket/utils/types.py | pesvut/netket | 0 | 12771200 | <filename>netket/utils/types.py
# Copyright 2021 The NetKet Authors - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Sequence, Callable, Union
import jax as _jax
import jaxlib as _jaxlib
import numpy as _np
# TODO: remove when jaxlib 0.1.61 is required and M1 jax/netket runs on m1 natively.
# compatibility with jaxlib<=0.1.61
# we don't really support this old jaxlib, because previous
# versions had bugs and dont work with mpi4jax, but some people
# do use that because of old computer without AVX so...
# eventually delete this.
try:
_DeviceArray = _jaxlib.xla_extension.DeviceArray
except AttributeError:
_DeviceArray = _jax.interpreters.xla._DeviceArray
PRNGKeyT = Any
SeedT = Union[int, PRNGKeyT]
Shape = Sequence[int]
DType = Any # this could be a real type?
Array = Union[_np.ndarray, _DeviceArray, _jax.core.Tracer]
NNInitFunc = Callable[[PRNGKeyT, Shape, DType], Array]
PyTree = Any
Scalar = Any
| 1.75 | 2 |
vang/bitbucket/tests/test_fork_repos.py | bjuvensjo/scripts | 6 | 12771201 | <reponame>bjuvensjo/scripts
from unittest.mock import call, patch
import pytest
from pytest import raises
from vang.bitbucket.fork_repos import fork_repo
from vang.bitbucket.fork_repos import fork_repos
from vang.bitbucket.fork_repos import main
from vang.bitbucket.fork_repos import parse_args
@patch('vang.bitbucket.fork_repos.call')
def test_fork_repo(mock_call):
mock_call.return_value = '"response"'
assert (('project_key', 'repo_slug'), '"response"') == fork_repo(
('project_key', 'repo_slug'), 'fork_project_key')
assert [
call(
'/rest/api/1.0/projects/project_key/repos/repo_slug',
{"slug": "repo_slug", "project": {"key": "fork_project_key"}},
'POST',
)
] == mock_call.mock_calls
@patch('vang.bitbucket.fork_repos.fork_repo')
def test_fork_repos(mock_fork_repo):
mock_fork_repo.side_effect = lambda x, y: (x, 'response')
assert [(['project_key', 'repo_slug'], 'response'),
(['project_key', 'repo_slug'], 'response')] == fork_repos(
[['project_key', 'repo_slug'], ['project_key', 'repo_slug']],
'fork_project_key',
)
@patch('vang.bitbucket.fork_repos.print')
@patch('vang.bitbucket.fork_repos.fork_repos')
@patch('vang.bitbucket.fork_repos.get_repo_specs')
def test_main(mock_get_repo_specs, mock_fork_repos, mock_print):
mock_get_repo_specs.return_value = [('d1', 'r1'), ('d2', 'r2')]
mock_fork_repos.return_value = [
(('d1', 'r1'), 'response1'),
(('d2', 'r2'), 'response2'),
]
main(
'fork_project_key',
['d1', 'd2'],
None,
None,
)
assert [call(['d1', 'd2'], None, None)] == mock_get_repo_specs.mock_calls
assert [
call([('d1', 'r1'), ('d2', 'r2')], 'fork_project_key'),
] == mock_fork_repos.mock_calls
assert [call('d1/r1: response1'),
call('d2/r2: response2')] == mock_print.mock_calls
@pytest.mark.parametrize("args", [
'',
'1 2',
'1 -d d -r r',
'1 -d d -p p',
'1 -r r -p p',
])
def test_parse_args_raises(args):
with raises(SystemExit):
parse_args(args.split(' ') if args else args)
@pytest.mark.parametrize("args, expected", [
[
'fork_project_key -d d1 d2',
{
'fork_project': 'fork_project_key',
'dirs': ['d1', 'd2'],
'repos': None,
'projects': None
}
],
[
'fork_project_key -r key1/repo1 key2/repo2',
{
'fork_project': 'fork_project_key',
'dirs': ['.'],
'repos': ['key1/repo1', 'key2/repo2'],
'projects': None
}
],
[
'fork_project_key -p key1 key2',
{
'fork_project': 'fork_project_key',
'dirs': ['.'],
'repos': None,
'projects': ['key1', 'key2']
}
],
])
def test_parse_args_valid(args, expected):
assert expected == parse_args(args.split(' ') if args else '').__dict__
| 2.265625 | 2 |
videos/urls.py | rupeshcode/youtube_scraper | 0 | 12771202 | from django.urls import include, path
from videos.views import manage_videos, manage_videos_search
app_name = 'videos'
urlpatterns = [
path('videos', manage_videos, name='manage_videos'),
path('videos/search', manage_videos_search, name='manage_videos_search')
]
| 1.84375 | 2 |
eodatasets/scripts/__init__.py | omad/eo-datasets | 0 | 12771203 | # coding=utf-8
from __future__ import absolute_import
import logging
def init_logging(debug):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s')
if debug:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger('eodatasets').setLevel(logging.INFO)
| 2.078125 | 2 |
utils/utilities.py | INK-USC/procedural-extraction | 5 | 12771204 | <filename>utils/utilities.py
import re
def convert_int(alist):
"""
Convert all int-able string in list to int
"""
for i in range(len(alist)):
try:
alist[i] = int(alist[i])
except ValueError:
pass
return alist
prune_delimitor = re.compile("([\\s,.!?/\\'\"])")
prune_dict = {
'',
'and',
'or'
}
def prune(sentence):
"""
Prunng the empty and meaningless sentences
"""
sentence = sentence.strip()
tokens = prune_delimitor.split(sentence)
l = 0
r = len(tokens)
while(l < r):
if tokens[l].lower() not in prune_dict:
break
l += 2
while(l < r):
if tokens[r-1].lower() not in prune_dict:
break
r -= 2
return ''.join(tokens[l: r])
def posstr(pos_list):
"""
Stringify pos list
"""
return ''.join(map(str, pos_list))
if __name__ == '__main__':
def test_prune():
print(prune("..and. ma,, wowo/ ,, . ., !")+"|")
print(prune("ma,, wowo")+"|")
print(prune("\tandma,, wowo")+"|")
print(prune("and and ma,, wowo. ")+"|")
print(prune("... ...")+"|")
test_prune()
| 3.9375 | 4 |
bubbleimg/obsobj/__init__.py | aileisun/bubblepy | 3 | 12771205 | <reponame>aileisun/bubblepy
# __init__.py
# ALS 2017/05/11
__all__ = ['obsobj', 'plainobj', 'sdss', 'hsc', 'objnaming', 'operator', 'imager']
from . import obsobj
from . import plainobj
from . import sdss
from . import hsc
from . import objnaming
from . import operator
from . import imager
from .obsobj import obsObj
from .operator import Operator
from .imager import Imager
from .sdss import sdssObj
from .hsc import hscObj
| 1.09375 | 1 |
labml/monit.py | Asjidkalam/labml | 2 | 12771206 | from typing import Iterable, Sized, Collection, Callable, Tuple
from typing import Union, Optional, overload
from labml.internal.monitor import monitor_singleton as _internal
def clear():
_internal().clear()
def func(name, *,
is_silent: bool = False,
is_timed: bool = True,
is_partial: bool = False,
is_new_line: bool = True,
is_children_silent: bool = False,
total_steps: float = 1.0):
def decorator_func(f: Callable):
def wrapper(*args, **kwargs):
with section(name,
is_silent=is_silent,
is_timed=is_timed,
is_partial=is_partial,
is_new_line=is_new_line,
is_children_silent=is_children_silent,
total_steps=total_steps):
return f(*args, **kwargs)
return wrapper
return decorator_func
def iterate(name, iterable: Union[Iterable, Sized, int],
total_steps: Optional[int] = None, *,
is_silent: bool = False,
is_children_silent: bool = False,
is_timed: bool = True,
context=None):
return _internal().iterate(name, iterable, total_steps,
is_silent=is_silent,
is_children_silent=is_children_silent,
is_timed=is_timed,
section=context)
def enum(name, iterable: Sized, *,
is_silent: bool = False,
is_children_silent: bool = False,
is_timed: bool = True,
context=None):
return _internal().enum(name, iterable,
is_silent=is_silent,
is_children_silent=is_children_silent,
is_timed=is_timed,
section=context)
def section(name, *,
is_silent: bool = False,
is_timed: bool = True,
is_partial: bool = False,
is_new_line: bool = True,
is_children_silent: bool = False,
total_steps: float = 1.0):
return _internal().section(name, is_silent=is_silent,
is_timed=is_timed,
is_partial=is_partial,
total_steps=total_steps,
is_new_line=is_new_line,
is_children_silent=is_children_silent)
def progress(steps: float):
_internal().progress(steps)
def fail():
_internal().set_successful(False)
@overload
def loop(iterator_: int, *,
is_track: bool = True,
is_print_iteration_time: bool = True):
...
@overload
def loop(iterator_: range, *,
is_track: bool = True,
is_print_iteration_time: bool = True):
...
@overload
def loop(iterator_: Collection, *,
is_track: bool = True,
is_print_iteration_time: bool = True):
...
def loop(iterator_: Union[Collection, range, int], *,
is_track: bool = True,
is_print_iteration_time: bool = True):
"""
This has multiple overloads
.. function:: loop(iterator_: range, *, is_track=True, is_print_iteration_time=True)
:noindex:
.. function:: loop(iterator_: int, *, is_track=True, is_print_iteration_time=True)
:noindex:
"""
if type(iterator_) == int:
return _internal().loop(range(iterator_),
is_track=is_track,
is_print_iteration_time=is_print_iteration_time)
else:
return _internal().loop(iterator_,
is_track=is_track,
is_print_iteration_time=is_print_iteration_time)
def mix(total_iterations, *iterators: Tuple[str, Sized],
is_monit: bool = True):
"""
Mix a set of iterators
"""
return _internal().mix(total_iterations, list(iterators), is_monit=is_monit)
def finish_loop():
_internal().finish_loop()
| 2.21875 | 2 |
tests/pipeline_runners/pipeline_runner_utils_test.py | elifesciences/sciencebeam | 272 | 12771207 | <gh_stars>100-1000
from unittest.mock import patch, MagicMock
import pytest
import sciencebeam.pipeline_runners.pipeline_runner_utils as pipeline_runner_utils_module
from sciencebeam.pipeline_runners.pipeline_runner_utils import (
get_remaining_file_list_for_args
)
BASE_TEST_PATH = '/tmp/test/conversion-pipeline'
BASE_DATA_PATH = BASE_TEST_PATH + '/data'
PDF_PATH = '*/*.pdf'
FILE_LIST_PATH = 'file-list.csv'
FILE_COLUMN = 'column1'
REL_PDF_FILE_WITHOUT_EXT_1 = '1/file'
PDF_FILE_1 = BASE_DATA_PATH + '/' + REL_PDF_FILE_WITHOUT_EXT_1 + '.pdf'
OUTPUT_PATH = BASE_TEST_PATH + '/out'
OUTPUT_SUFFIX = '.xml'
@pytest.fixture(name='load_file_list_mock', autouse=True)
def _load_file_list_mock():
with patch.object(pipeline_runner_utils_module, 'load_file_list') as mock:
yield mock
@pytest.fixture(name='find_matching_filenames_with_limit_mock', autouse=True)
def _find_matching_filenames_with_limit_mock():
with patch.object(pipeline_runner_utils_module, 'find_matching_filenames_with_limit') as mock:
yield mock
@pytest.fixture(name='map_file_list_to_file_exists_mock', autouse=True)
def _map_file_list_to_file_exists_mock():
with patch.object(pipeline_runner_utils_module, 'map_file_list_to_file_exists') as mock:
mock.side_effect = lambda file_list: [False] * len(file_list)
yield mock
@pytest.fixture(name='args')
def get_default_args():
opt = MagicMock()
opt.base_data_path = BASE_DATA_PATH
opt.output_path = OUTPUT_PATH
opt.output_suffix = OUTPUT_SUFFIX
opt.limit = None
return opt
@pytest.fixture(name='file_path_args')
def get_file_path_args(args):
opt = args
opt.source_path = PDF_PATH
opt.source_file_list = None
return opt
@pytest.fixture(name='file_list_args')
def get_file_list_args(args):
opt = args
opt.source_path = None
opt.source_file_list = BASE_DATA_PATH + '/file-list.tsv'
opt.source_file_column = 'url'
return opt
class TestGetRemainingFileListForArgs:
def test_should_pass_file_pattern_to_find_files(
self, file_path_args,
find_matching_filenames_with_limit_mock: MagicMock):
find_matching_filenames_with_limit_mock.return_value = [PDF_FILE_1]
assert (
get_remaining_file_list_for_args(file_path_args)
== find_matching_filenames_with_limit_mock.return_value
)
find_matching_filenames_with_limit_mock.assert_called_with(
BASE_DATA_PATH + '/' + PDF_PATH,
limit=file_path_args.limit
)
def test_should_pass_file_list_and_limit_to_load_file_list(
self, file_list_args,
load_file_list_mock: MagicMock):
opt = file_list_args
opt.limit = 100
load_file_list_mock.return_value = [PDF_FILE_1]
assert (
get_remaining_file_list_for_args(opt)
== load_file_list_mock.return_value
)
load_file_list_mock.assert_called_with(
opt.source_file_list, column=opt.source_file_column, limit=opt.limit
)
| 2.09375 | 2 |
test/remoting/webview-localfile/test.py | namaljayathunga/nw.js | 27 | 12771208 | <gh_stars>10-100
import time
import os
import urlparse, urllib
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from nw_util import *
def path2url(path):
return urlparse.urljoin(
'file:', urllib.pathname2url(path))
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
testdir = os.path.dirname(os.path.abspath(__file__))
os.chdir(testdir)
chrome_options = Options()
chrome_options.add_argument("nwapp=" + testdir)
chrome_options.add_experimental_option("windowTypes", ["webview"])
htmlfile = os.path.join(testdir, '1.html')
localurl = path2url(htmlfile)
tpl = open('index.tpl', 'r')
content = tpl.read().replace('{localurl}', localurl)
tpl.close()
html = open('index.html', 'w')
html.write(content)
html.close()
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options, service_log_path="log", service_args=["--verbose"])
driver.implicitly_wait(5)
time.sleep(1)
try:
print driver.current_url
wait_switch_window_name(driver, 'webview1')
result = driver.find_element_by_id('result').get_attribute('innerHTML')
print result
assert('success' in result)
driver.switch_to_window('main')
driver.find_element_by_tag_name('button').click() #launch cdt
time.sleep(3)
driver.switch_to_window(driver.window_handles[2]) #switch to wv2
assert('is not available' in driver.title)
driver.switch_to_window(driver.window_handles[1])
print 'click Elements panel'
driver.execute_script('return document.querySelector(".inspector-view-tabbed-pane").shadowRoot.getElementById("tab-elements")').click()
print 'find h1'
h1 = driver.execute_script('return document.getElementById("elements-content").firstChild.shadowRoot.querySelectorAll(".webkit-html-text-node")[1]').get_attribute('textContent')
print h1
assert (h1 == 'success')
finally:
driver.quit()
| 2.921875 | 3 |
bleet/views.py | ntoll/facebaaak | 0 | 12771209 | import sys
from django.shortcuts import render, get_object_or_404
from bleet.models import Bleet
from users.models import Follow, Profile
from django.contrib.auth.models import User
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView,
)
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.db.models import Count
from rest_framework.viewsets import ModelViewSet
from .serializers import BleetSerializer
def is_author(post, request):
"""
Returns a boolean indication if the post is authored by the logged in
user who made the request.
"""
return post.author == request.user
#: Number of bleets to show per page.
PAGINATION_COUNT = 10
class BleetListView(LoginRequiredMixin, ListView):
"""
Displays a list of bleets
"""
model = Bleet
template_name = "bleet/home.html"
context_object_name = "bleets"
ordering = ["-date_posted"]
paginate_by = PAGINATION_COUNT
def get_queryset(self):
user = self.request.user
qs = Follow.objects.filter(user=user)
follows = [user]
for obj in qs:
follows.append(obj.follow_user)
return Bleet.objects.filter(author__in=follows).order_by("-date_posted")
class UserBleetListView(LoginRequiredMixin, ListView):
model = Bleet
template_name = "bleet/user_posts.html"
context_object_name = "bleets"
paginate_by = PAGINATION_COUNT
def visible_user(self):
return get_object_or_404(User, username=self.kwargs.get("username"))
def get_context_data(self, **kwargs):
visible_user = self.visible_user()
logged_user = self.request.user
if logged_user.username == "" or logged_user is None:
can_follow = False
else:
can_follow = (
Follow.objects.filter(
user=logged_user, follow_user=visible_user
).count()
== 0
)
data = super().get_context_data(**kwargs)
data["user_profile"] = visible_user
data["can_follow"] = can_follow
return data
def get_queryset(self):
user = self.visible_user()
return Bleet.objects.filter(author=user).order_by("-date_posted")
def post(self, request, *args, **kwargs):
if request.user.id is not None:
follows = Follow.objects.filter(
user=request.user, follow_user=self.visible_user()
)
if "follow" in request.POST:
new_relation = Follow(
user=request.user, follow_user=self.visible_user()
)
if follows.count() == 0:
new_relation.save()
elif "unfollow" in request.POST:
if follows.count() > 0:
follows.delete()
return self.get(self, request, *args, **kwargs)
class BleetDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Bleet
template_name = "bleet/bleet_delete.html"
context_object_name = "bleet"
success_url = "/"
def test_func(self):
return is_author(self.get_object(), self.request)
class BleetCreateView(LoginRequiredMixin, CreateView):
model = Bleet
fields = ["content"]
template_name = "bleet/bleet_new.html"
success_url = "/"
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data["tag_line"] = "Add a new bleet"
return data
class FollowsListView(ListView):
model = Follow
template_name = "bleet/follow.html"
context_object_name = "follows"
def visible_user(self):
return get_object_or_404(User, username=self.kwargs.get("username"))
def get_queryset(self):
user = self.visible_user()
return Follow.objects.filter(user=user).order_by("-date")
def get_context_data(self, *, object_list=None, **kwargs):
data = super().get_context_data(**kwargs)
data["follow"] = "follows"
return data
class FollowersListView(ListView):
model = Follow
template_name = "bleet/follow.html"
context_object_name = "follows"
def visible_user(self):
return get_object_or_404(User, username=self.kwargs.get("username"))
def get_queryset(self):
user = self.visible_user()
return Follow.objects.filter(follow_user=user).order_by("-date")
def get_context_data(self, *, object_list=None, **kwargs):
data = super().get_context_data(**kwargs)
data["follow"] = "followers"
return data
class BleetViewSet(ModelViewSet):
queryset = Bleet.objects.all()
serializer_class = BleetSerializer
| 2.140625 | 2 |
2.With WebCam/rps.py | XiyuZhai97/Robotic-Hands-19 | 2 | 12771210 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import numpy as np
import PIL.Image as Image
import time
import cv2
from pyfirmata import Arduino
board = Arduino('/dev/ttyACM0')
little = board.get_pin('d:3:s')
ring = board.get_pin('d:5:s')
middle = board.get_pin('d:6:s')
thumb = board.get_pin('d:9:s')
index = board.get_pin('d:10:s')
def rock():
thumb.write(170)
index.write(170)
middle.write(170)
ring.write(170)
little.write(170)
def paper():
thumb.write(10)
index.write(10)
middle.write(10)
ring.write(10)
little.write(10)
def scissor():
thumb.write(170)
index.write(10)
middle.write(10)
ring.write(170)
little.write(170)
def rps(model_dir, classes):
clicked = False
def onMouse(event, x, y, flags, param):
global clicked
if event == cv2.EVENT_LBUTTONUP:
clicked = True
cameraCapture = cv2.VideoCapture(2)
cameraCapture.set(3, 100)
cameraCapture.set(4, 100)
cv2.namedWindow('MyWindow')
cv2.setMouseCallback('MyWindow', onMouse)
print('showing camera feed. Click window or press and key to stop.')
success, frame = cameraCapture.read()
print(success)
count = 0
flag = 0
saver = tf.train.import_meta_graph(model_dir+".meta")
with tf.Session() as sess:
saver.restore(sess, model_dir)
x = tf.get_default_graph().get_tensor_by_name("images:0")
keep_prob = tf.get_default_graph().get_tensor_by_name("keep_prob:0")
y = tf.get_default_graph().get_tensor_by_name("fc2/output:0")
count=0
while success and cv2.waitKey(1)==-1 and not clicked:
time1 = time.time()
cv2.imshow('MyWindow', frame)
success, frame = cameraCapture.read()
img = Image.fromarray(frame)
# 将图片转化成灰度并缩小尺寸
img = np.array(img.convert('L').resize((28, 28)),dtype=np.float32)
img = img.reshape((1,28*28))
img = img/255.0
prediction = sess.run(y, feed_dict={x:img,keep_prob: 1.0})
index = np.argmax(prediction)
probability = prediction[0][index]
if index==0 and flag!=0 and probability>0.8:
print('you paper, me scissor')
scissor()
flag=0
elif index==1 and flag!=1 and probability>0.8:
print('you rock, me paper')
paper()
flag = 1
elif index==2 and flag!=2 and probability>0.8:
print('you scissor, me rock')
rock()
flag = 2
elif index == 3 and flag != 3 and probability > 0.8:
# rotate(p, -30)
print('hey, show either rock, paper or scissor')
flag = 3
cv2.destroyWindow('MyWindow')
cameraCapture.release()
if __name__=="__main__":
classes = ['paper', 'rock', 'scissors', 'others']
model_dir="model/model.ckpt"
time.sleep(2)
rps(model_dir, classes) | 2.359375 | 2 |
test_number.py | bilaleluneis/LearningPython | 0 | 12771211 | from unittest import TestCase
from class_odd_and_prime_number import Number
class TestNumber(TestCase):
def test_number_init(self):
valid_number = Number(5)
self.assertEqual(valid_number.value, 5)
| 3.515625 | 4 |
tests/lists_tests/test_contains.py | lycantropos/cppbuiltins | 1 | 12771212 | <filename>tests/lists_tests/test_contains.py
from typing import Any
from hypothesis import given
from tests.utils import (AlternativeNativeListsPair,
equivalence)
from . import strategies
@given(strategies.lists_pairs, strategies.objects)
def test_basic(pair: AlternativeNativeListsPair, value: Any) -> None:
alternative, native = pair
assert equivalence(value in alternative, value in native)
| 2.296875 | 2 |
tests/test_activities.py | KE-works/pykechain | 5 | 12771213 | <reponame>KE-works/pykechain
import os
import warnings
from datetime import datetime
from unittest import skip, skipIf
import pytest
import pytz
import requests
from pykechain.enums import (
ActivityType,
ActivityStatus,
ActivityClassification,
Category,
activity_root_name_by_classification,
ActivityRootNames,
PaperSize,
PaperOrientation,
NotificationEvent,
Multiplicity,
PropertyType,
)
from pykechain.exceptions import (
NotFoundError,
MultipleFoundError,
IllegalArgumentError,
APIError,
)
from pykechain.models import Activity
from pykechain.models.representations import CustomIconRepresentation
from pykechain.utils import temp_chdir, slugify_ref
from tests.classes import TestBetamax
from tests.utils import TEST_FLAG_IS_WIM2
ISOFORMAT = "%Y-%m-%dT%H:%M:%SZ"
ISOFORMAT_HIGHPRECISION = "%Y-%m-%dT%H:%M:%S.%fZ"
class TestActivityConstruction(TestBetamax):
def setUp(self):
super().setUp()
self.process = self.project.create_activity(
name="__Test process", activity_type=ActivityType.PROCESS,
)
self.task = None
def tearDown(self):
for activity in [self.task, self.process]:
if activity:
try:
activity.delete()
except APIError:
pass
super().tearDown()
def test_create_with_inputs(self):
name = "__Testing task"
ref = slugify_ref(name)
description = "My new task"
status = ActivityStatus.OPEN
activity_type = ActivityType.TASK
classification = ActivityClassification.WORKFLOW
# setUp
self.task = self.client.create_activity(
name=name,
ref=ref,
parent=self.process,
status=status,
description=description,
start_date=self.time,
due_date=self.time,
activity_type=activity_type,
classification=classification,
activity_options=dict(
representations=[
CustomIconRepresentation(value="pennant").as_json(),
],
),
)
# testing
self.assertIsInstance(self.task, Activity)
self.assertEqual(name, self.task.name)
self.assertEqual(ref, self.task.ref)
self.assertEqual(status, self.task.status)
self.assertTrue(description, self.task.description)
self.assertIsInstance(self.task.start_date, datetime)
self.assertIsInstance(self.task.due_date, datetime)
self.assertEqual(activity_type, self.task.activity_type)
self.assertEqual(classification, self.task.classification)
def test_create_on_scope(self):
self.task = self.project.create_activity("__Test task")
self.assertIsInstance(self.task, Activity)
self.assertEqual(ActivityType.TASK, self.task.activity_type)
self.assertEqual(ActivityClassification.WORKFLOW, self.task.classification)
def test_create_below_parent(self):
self.process.children() # populate `_cached_children`.
self.assertIsNotNone(
self.process._cached_children, "Cached children should be an (empty) list."
)
new_task = self.process.create(
name="__Testing task", activity_type=ActivityType.TASK,
)
current_children = self.process.children()
self.assertTrue(current_children)
self.assertIn(
new_task,
current_children,
msg="New child task should be among the children.",
)
# NOTE: Change this test once we release the FORMS backend to production or a testing setup.
@skip("This test does not work as the testing backend does not have FORMS in the codebase")
def test_create_with_classification(self):
for classification in ActivityClassification.values():
with self.subTest(msg="Classification: {}".format(classification)):
# setUp 1
root_name = activity_root_name_by_classification[classification]
root = self.project.activity(name=root_name)
# testing 1
self.assertEqual(classification, root.classification)
self.assertEqual(ActivityType.PROCESS, root.activity_type)
# setUp 2
task = self.client.create_activity(
parent=root,
name="{}".format(classification),
classification=classification,
)
# testing 2
self.assertEqual(classification, task.classification)
# tearDown
task.delete()
def test_create_with_incorrect_classification(self):
with self.assertRaises(IllegalArgumentError):
self.project.create_activity(
name="Impossible classification", classification="Gummy bears",
)
def test_create_with_incorrect_parent(self):
with self.assertRaises(IllegalArgumentError):
self.client.create_activity(
name="Impossible parent", parent="Darth vader",
)
def test_create_with_task_as_parent(self):
task = self.process.create(name="__Test task")
with self.assertRaises(
IllegalArgumentError, msg="Tasks cannot be created below other tasks!"
):
task.create("This cannot happen")
def test_create_with_incorrect_inputs(self):
with self.assertRaises(IllegalArgumentError):
self.project.create_activity("__test_task", status="COMPLETE")
with self.assertRaises(IllegalArgumentError):
self.project.create_activity("__test_task", start_date=4)
with self.assertRaises(IllegalArgumentError):
self.project.create_activity("__test_task", description=1234)
with self.assertRaises(IllegalArgumentError):
self.project.create_activity("__test_task", classification="PRODUCT")
def test_delete(self):
# setUp
sub_process_name = "__Test subprocess"
sub_task_name = "__Test subtask"
subprocess = self.process.create(
name=sub_process_name, activity_type=ActivityType.PROCESS
)
self.task = subprocess.create(name=sub_task_name)
subprocess.delete()
# testing
with self.assertRaises(APIError, msg="Cant delete the same Activity twice!"):
subprocess.delete()
with self.assertRaises(NotFoundError, msg="Deleted Activity cant be found!"):
self.project.activity(name=sub_process_name)
with self.assertRaises(
NotFoundError, msg="Children of deleted Activities cant be found!"
):
self.project.activity(name=sub_task_name)
class TestActivityClone(TestBetamax):
def setUp(self):
super().setUp()
self.process = self.project.create_activity(
name="__TEST CLONE SUBPROCESS", activity_type=ActivityType.PROCESS
)
self.task = self.process.create("__TEST CLONE TASK")
self.clone = None
self.bucket = [self.process]
def tearDown(self):
for activity in self.bucket:
if activity:
try:
activity.delete()
except APIError:
pass
super().tearDown()
def test(self):
clone = self.task.clone()
self.assertIsInstance(clone, Activity)
self.assertNotEqual(self.task, clone)
self.assertEqual(self.task.parent_id, clone.parent_id)
def test_parent_id(self):
second_process = self.project.create_activity(
name="__Test process 2", activity_type=ActivityType.PROCESS,
)
self.bucket.append(second_process)
clone = self.task.clone(parent=second_process,)
self.assertNotEqual(self.task.parent_id, clone.parent_id)
def test_update(self):
new_name = "__TEST TASK RENAMED"
clone = self.task.clone(update_dict=dict(name=new_name),)
self.assertEqual(new_name, clone.name)
def test_update_incorrect(self):
with self.assertRaises(IllegalArgumentError):
self.task.clone(part_parent_instance=True)
def test_async_via_task(self):
response = self.task.clone(asynchronous=True)
self.assertIsNone(response)
def test_async_via_client(self):
response = self.client.clone_activities(
activities=[self.task], activity_parent=self.process, asynchronous=True
)
self.assertIsInstance(response, list)
self.assertFalse(response)
class TestActivityCloneParts(TestBetamax):
def setUp(self):
super().setUp()
# Create task to clone
self.process = self.project.create_activity(
name="__TEST CLONE SUBPROCESS", activity_type=ActivityType.PROCESS
)
self.task = self.process.create(name="__TEST CLONE TASK")
# Create part model to copy along
intermediate = self.project.catalog_root_model.add_model(
name="__TEST CLONE INTERMEDIATE MODEL", multiplicity=Multiplicity.ONE
)
source_parent_model = intermediate.add_model(
name="__TEST CLONE CONFIGURED MODEL - PARENT",
multiplicity=Multiplicity.ONE_MANY,
)
child_model = source_parent_model.add_model(
name="__TEST CLONE CONFIGURED MODEL - CHILD",
multiplicity=Multiplicity.ONE_MANY,
)
for prop_type in [
PropertyType.CHAR_VALUE,
PropertyType.DATE_VALUE,
]:
child_model.add_property(
name="__TEST " + prop_type, property_type=prop_type
)
# Add widget to add configured part models
wm = self.task.widgets()
wm.add_filteredgrid_widget(
parent_instance=source_parent_model.instance(),
part_model=child_model,
all_readable=True,
)
self.bike_model = self.project.model("Bike")
self.bike_instance = self.bike_model.instance()
wm.add_propertygrid_widget(
part_instance=self.bike_instance, all_readable=True,
)
# Create target parents to move to
self.target_parent_model = self.project.product_root_model.add_model(
name="__TEST CLONE TARGET PARENT", multiplicity=Multiplicity.ONE
)
self.parent_instance = self.target_parent_model.instance()
# In tearDown, delete tasks first, then configured data models
self.bucket = [self.task, self.process, intermediate, self.target_parent_model]
def tearDown(self):
for obj in self.bucket:
if obj:
try:
obj.delete()
except APIError:
pass
super().tearDown()
def test(self):
"""Copy a data model from the catalog to the product data model tree"""
clones = self.client.clone_activities(
activities=[self.task],
activity_parent=self.process,
activity_update_dicts={
self.task.id: {"name": "__TEST CLONE ACTIVITY WITH PARTS"}
},
include_part_models=True,
include_part_instances=True,
include_children=True,
part_parent_model=self.target_parent_model,
part_parent_instance=self.parent_instance,
asynchronous=False,
)
self.assertTrue(clones)
new_children = list(self.parent_instance.children())
self.assertTrue(new_children, msg="No parts were copied")
def test_excluded_models(self):
"""Exclude the bike model from the copy"""
clones = self.client.clone_activities(
activities=[self.task],
activity_parent=self.process,
activity_update_dicts={
self.task.id: {"name": "__TEST CLONE ACTIVITY WITH PARTS"}
},
include_part_models=True,
include_part_instances=True,
include_children=True,
excluded_parts=[self.bike_model, self.bike_instance],
part_parent_model=self.target_parent_model,
part_parent_instance=self.parent_instance,
asynchronous=False,
)
self.assertTrue(clones)
new_children = list(self.parent_instance.children())
self.assertTrue(new_children, msg="No parts were copied")
self.assertNotIn(
"Bike",
{c.name for c in new_children},
msg="Bike should not have been copied over. "
"Actually it is not copied over, it is moved to the parent_instance",
)
class TestActivities(TestBetamax):
NAME = "__TEST ACTIVITY"
def setUp(self):
super().setUp()
self.workflow_root = self.project.activity(name=ActivityRootNames.WORKFLOW_ROOT)
self.task = self.project.create_activity(
name=self.NAME, activity_type=ActivityType.TASK
)
def tearDown(self):
if self.task:
try:
self.task.delete()
except APIError:
pass
super().tearDown()
def test_retrieve_activities(self):
self.assertTrue(self.project.activities())
def test_retrieve_single_activity(self):
self.assertTrue(self.project.activity(self.NAME))
def test_activity_attributes(self):
attributes = [
"_client",
"_json_data",
"id",
"name",
"created_at",
"updated_at",
"ref",
"description",
"status",
"activity_type",
"_scope_id",
"start_date",
"due_date",
]
for attribute in attributes:
with self.subTest(msg=attribute):
self.assertTrue(
hasattr(self.task, attribute),
"Could not find '{}' in the object: '{}'".format(
attribute, self.task.__dict__.keys()
),
)
def test_retrieve_unknown_activity(self):
with self.assertRaises(NotFoundError):
self.project.activity("Hello?!")
def test_retrieve_too_many_activity(self):
with self.assertRaises(MultipleFoundError):
self.project.activity()
# new in 1.7
def test_edit_activity_name(self):
self.task.edit(name="Specify wheel diameter - updated")
self.task_u = self.project.activity("Specify wheel diameter - updated")
self.assertEqual(self.task.id, self.task_u.id)
self.assertEqual(self.task.name, self.task_u.name)
self.assertEqual(self.task.name, "Specify wheel diameter - updated")
# Added to improve coverage. Assert whether IllegalArgumentError is raised when 'name' is not a string object.
with self.assertRaises(IllegalArgumentError):
self.task.edit(name=True)
def test_edit_activity_description(self):
self.task.edit(description="This task has a cool description")
self.assertEqual(self.task._client.last_response.status_code, requests.codes.ok)
# Added to improve coverage. Assert whether IllegalArgumentError is raised when 'description' is
# not a string object.
with self.assertRaises(IllegalArgumentError):
self.task.edit(description=42)
def test_edit_activity_naive_dates(self):
start_time = datetime(2000, 1, 1, 0, 0, 0)
due_time = datetime(2019, 12, 31, 0, 0, 0)
with warnings.catch_warnings(record=False):
warnings.simplefilter("ignore")
self.task.edit(start_date=start_time, due_date=due_time)
self.assertEqual(self.task._client.last_response.status_code, requests.codes.ok)
with self.assertRaises(IllegalArgumentError):
self.task.edit(start_date="All you need is love")
with self.assertRaises(IllegalArgumentError):
self.task.edit(due_date="Love is all you need")
def test_edit_due_date_timezone_aware(self):
self.task.edit(start_date=self.time, due_date=self.time)
self.assertEqual(self.task._client.last_response.status_code, requests.codes.ok)
# 1.10.0
def test_edit_activity_status(self):
self.task.edit(status=ActivityStatus.COMPLETED)
for status in [True, "NO STATUS", 3]:
with self.subTest(msg=status):
with self.assertRaises(IllegalArgumentError):
self.task.edit(status=status)
# 1.7.2
def test_datetime_with_naive_duedate_only_fails(self):
"""reference to #121 - thanks to @joost.schut"""
naive_duedate = datetime(2017, 6, 5, 5, 0, 0)
with warnings.catch_warnings(record=False):
warnings.simplefilter("ignore")
self.task.edit(due_date=naive_duedate)
def test_datetime_with_tzinfo_provides_correct_offset(self):
"""reference to #121 - thanks to @joost.schut
The tzinfo.timezone('Europe/Amsterdam') should provide a 2 hour offset, recording 20 minutes
"""
# setup
tz = pytz.timezone("Europe/Amsterdam")
tzaware_due = tz.localize(datetime(2017, 7, 1))
tzaware_start = tz.localize(datetime(2017, 6, 30, 0, 0, 0))
self.task.edit(start_date=tzaware_start)
self.assertTrue(
self.task._json_data["start_date"], tzaware_start.isoformat(sep="T")
)
self.task.edit(due_date=tzaware_due)
self.assertTrue(
self.task._json_data["due_date"], tzaware_due.isoformat(sep="T")
)
def test_edit_cascade_down(self):
# setup
subprocess = self.project.activity("Subprocess") # type: Activity
subtask = self.project.activity("SubTask") # type: Activity
testuser = self.client.user(username="testuser")
subprocess.edit_cascade_down(
assignees=["testuser"],
status=ActivityStatus.COMPLETED,
overwrite=False,
)
subprocess.refresh()
subtask.refresh()
# testing
self.assertIn(testuser, subprocess.assignees)
self.assertIn(testuser, subtask.assignees)
self.assertEqual(subprocess.status, ActivityStatus.COMPLETED)
self.assertEqual(subtask.status, ActivityStatus.COMPLETED)
# tearDown
subprocess.edit(assignees=[], status=ActivityStatus.OPEN)
subtask.edit(assignees=[], status=ActivityStatus.OPEN)
# test added due to #847 - providing no inputs overwrites values
def test_edit_activity_clearing_values(self):
# setup
initial_name = 'Pykechain testing task'
initial_description = 'Task created to test editing.'
initial_start_date = datetime(2018, 12, 5, tzinfo=None)
initial_due_date = datetime(2018, 12, 8, tzinfo=None)
initial_tags = ['tag_one', 'tag_two']
initial_assignee = self.client.user(username="testuser")
self.task.edit(name=initial_name, description=initial_description, tags=initial_tags,
start_date=initial_start_date, due_date=initial_due_date, assignees=[initial_assignee.username])
# Edit without mentioning values, everything should stay the same
new_name = '<NAME> for task'
self.task.edit(name=new_name)
# testing
self.assertEqual(self.task.name, new_name)
self.assertEqual(self.task.description, initial_description)
self.assertEqual(self.task.start_date.strftime("%Y/%m/%d, %H:%M:%S"),
initial_start_date.strftime("%Y/%m/%d, %H:%M:%S"))
self.assertEqual(self.task.due_date.strftime("%Y/%m/%d, %H:%M:%S"),
initial_due_date.strftime("%Y/%m/%d, %H:%M:%S"))
self.assertEqual(self.task.tags, initial_tags)
# Edit with clearing the values, name and status cannot be cleared
self.task.edit(name=None, description=None, tags=None, start_date=None, due_date=None, status=None,
assignees=None)
self.task.refresh()
self.assertEqual(self.task.name, new_name)
self.assertEqual(self.task.description, str())
self.assertEqual(self.task.start_date, None)
self.assertEqual(self.task.due_date, None)
self.assertEqual(self.task.assignees, list())
self.assertEqual(self.task.tags, list())
def test_retrieve_children_of_task_fails_for_task(self):
with self.assertRaises(NotFoundError, msg="Tasks have no children!"):
self.task.children()
def test_child(self):
child_task = self.workflow_root.child(name=self.NAME)
self.assertIsInstance(child_task, Activity)
self.assertEqual(child_task._json_data["parent_id"], self.workflow_root.id)
def test_child_invalid(self):
with self.assertRaises(IllegalArgumentError):
self.workflow_root.child()
second_process = self.workflow_root.create(name=self.NAME)
with self.assertRaises(MultipleFoundError):
self.workflow_root.child(name=self.NAME)
second_process.delete()
with self.assertRaises(NotFoundError):
self.workflow_root.child(name="Just a scratch")
def test_retrieve_all_children(self):
all_tasks = self.workflow_root.all_children()
self.assertIsInstance(all_tasks, list)
self.assertEqual(
12, len(all_tasks), msg="Number of tasks has changed, expected 12."
)
def test_retrieve_activity_by_id(self):
task = self.project.activity(name="Subprocess") # type: Activity
task_by_id = self.client.activity(pk=task.id)
self.assertEqual(task.id, task_by_id.id)
def test_retrieve_siblings_of_a_task_in_a_subprocess(self):
task = self.project.activity(name="Subprocess") # type: Activity
siblings = task.siblings()
self.assertIn(task.id, [sibling.id for sibling in siblings])
self.assertTrue(len(siblings) >= 1)
def test_retrieve_siblings_of_root(self):
with self.assertRaises(NotFoundError):
self.workflow_root.siblings()
# in 1.12
def test_retrieve_siblings_of_a_task_in_a_subprocess_with_arguments(self):
task = self.project.activity(name="SubTask") # type: Activity
siblings = task.siblings(name__icontains="sub")
self.assertIn(task.id, [sibling.id for sibling in siblings])
self.assertEqual(1, len(siblings))
@skipIf(
not TEST_FLAG_IS_WIM2,
reason="This tests is designed for WIM version 2, expected to fail on old WIM",
)
def test_activity_without_scope_id_will_fix_itself(self):
specify_wheel_diam_cripled = self.project.activity(
name="Specify wheel diameter", fields="id,name,status"
)
self.assertFalse(specify_wheel_diam_cripled._json_data.get("scope_id"))
# now the self-healing will begin
self.assertEqual(specify_wheel_diam_cripled.scope_id, self.project.id)
# in 1.13
def test_create_activity_with_incorrect_activity_class_fails(self):
with self.assertRaisesRegex(
IllegalArgumentError, "must be an option from enum"
):
self.project.create_activity(name="New", activity_type="DEFUNCTActivity")
#
# @skipIf(not TEST_FLAG_IS_WIM2, reason="This tests is designed for WIM version 2, expected to fail on older WIM")
# class TestActivitiesWIM2(TestBetamax):
#
# def setUp(self):
# super().setUp()
# self.root = self.project.activity(ActivityRootNames.WORKFLOW_ROOT)
# self.task = self.root.create(name='test task', activity_type=ActivityType.TASK)
#
# def tearDown(self):
# if self.task:
# self.task.delete()
# super().tearDown()
# 2.0 new activity
# noinspection PyTypeChecker
def test_edit_activity_assignee(self):
specify_wd = self.project.activity("Specify wheel diameter") # type: Activity
original_assignee_ids = specify_wd._json_data.get("assignee_ids") or []
# pykechain_user = self.client.user(username='pykechain')
test_user = self.client.user(username="testuser")
specify_wd.edit(assignees_ids=[test_user.id])
specify_wd.refresh()
self.assertIsInstance(specify_wd._json_data.get("assignees_ids")[0], int)
self.assertEqual(
specify_wd._client.last_response.status_code, requests.codes.ok
)
# Added to improve coverage. Assert whether NotFoundError is raised when 'assignee' is not part of the
# scope members
with self.assertRaises(NotFoundError):
specify_wd.edit(assignees_ids=[-100])
# Added to improve coverage. Assert whether NotFoundError is raised when 'assignee' is not part of the
# scope members
with self.assertRaises(IllegalArgumentError):
specify_wd.edit(assignees_ids="this should have been a list")
specify_wd.edit(assignees_ids=original_assignee_ids)
def test_activity_retrieve_parent_of_task(self):
task = self.project.activity(name="SubTask")
subprocess = task.parent() # type Activity
self.assertEqual(subprocess.activity_type, ActivityType.PROCESS)
def test_activity_retrieve_parent_of_root(self):
task = self.project.activity(name=ActivityRootNames.WORKFLOW_ROOT)
with self.assertRaises(NotFoundError):
task.parent()
def test_activity_retrieve_parent_of_a_toplevel_task_returns_workflow_root_id(
self,
):
task = self.project.activity("Specify wheel diameter")
parent = task.parent()
self.assertEqual(self.project._json_data.get("workflow_root_id"), parent.id)
def test_activity_test_workflow_root_object(self):
workflow_root = self.project.activity(
id=self.project._json_data.get("workflow_root_id")
)
self.assertTrue(workflow_root.is_root())
self.assertTrue(workflow_root.is_workflow_root())
def test_activity_retrieve_children_of_parent(self):
subprocess = self.project.activity(name="Subprocess") # type: Activity
children = subprocess.children()
self.assertTrue(len(children) >= 1)
for child in children:
self.assertEqual(child._json_data.get("parent_id"), subprocess.id)
def test_activity_retrieve_children_of_subprocess_with_arguments(self):
subprocess = self.project.activity(name="Subprocess") # type: Activity
children = subprocess.children(name__icontains="task")
self.assertTrue(len(children) >= 1)
for child in children:
self.assertEqual(child._json_data.get("parent_id"), subprocess.id)
def test_count_children(self):
process = self.project.activity(name="Tasks with Widgets")
nr = process.count_children()
self.assertIsInstance(nr, int)
self.assertEqual(7, nr)
nr = process.count_children(name__contains="Service")
self.assertEqual(4, nr)
with self.assertRaises(IllegalArgumentError):
self.task.count_children()
def test_rootlevel_activity_is_rootlevel(self):
specify_wd = self.project.activity("Specify wheel diameter")
self.assertTrue(specify_wd.is_rootlevel())
root_itself = self.project.activity(ActivityRootNames.WORKFLOW_ROOT)
self.assertFalse(root_itself.is_rootlevel())
def test_subtask_activity_is_not_rootlevel(self):
subprocess_subtask = self.project.activity("SubTask")
self.assertFalse(subprocess_subtask.is_rootlevel())
def test_activity_is_task(self):
specify_wd = self.project.activity("Specify wheel diameter")
self.assertTrue(specify_wd.is_task())
self.assertFalse(specify_wd.is_subprocess())
def test_activity_is_subprocess(self):
subprocess = self.project.activity("Subprocess")
self.assertTrue(subprocess.is_subprocess())
self.assertFalse(subprocess.is_task())
def test_activity_assignees_list(self):
list_of_assignees_in_data = self.task._json_data.get("assignees_ids")
assignees_list = self.task.assignees
self.assertSetEqual(
set(list_of_assignees_in_data), set([u.id for u in assignees_list])
)
def test_activity_assignees_list_no_assignees_gives_empty_list(self):
activity_name = "Specify wheel diameter"
activity = self.project.activity(name=activity_name) # type: Activity
self.assertListEqual(
list(),
activity.assignees,
"Task has no assignees and should return Empty list",
)
def test_activity_move(self):
# setUp
activity_to_be_moved = self.task
new_parent_name = "Subprocess"
new_parent = self.project.activity(name=new_parent_name)
activity_to_be_moved.move(parent=new_parent)
# testing
self.assertEqual(new_parent, activity_to_be_moved.parent())
def test_activity_move_under_task_parent(self):
# setUp
new_parent_name = "Specify wheel diameter"
new_parent = self.project.activity(name=new_parent_name)
# testing
with self.assertRaises(IllegalArgumentError):
self.task.move(parent=new_parent)
def test_activity_move_under_part_object(self):
# setUp
new_parent_name = "Bike"
new_parent = self.project.part(name=new_parent_name)
# testing
with self.assertRaises(IllegalArgumentError):
self.task.move(parent=new_parent)
# tests added in 3.0
def test_activity_retrieve_with_refs(self):
# setup
test_task_ref = slugify_ref(self.task.name)
test_task_activity = self.project.activity(ref=test_task_ref)
# testing
self.assertIsInstance(test_task_activity, Activity)
self.assertEqual(self.task, test_task_activity)
def test_activity_associated_parts(self):
# setUp
activity_name = "Task - Form + Tables + Service"
activity = self.project.activity(name=activity_name)
associated_models, associated_instances = activity.associated_parts()
# testing
for model in associated_models:
self.assertTrue(model.category == Category.MODEL)
if model.name == "Bike":
self.assertTrue(model.property(name="Gears").output)
self.assertFalse(model.property(name="Total height").output)
self.assertFalse(model.property(name="Picture").output)
self.assertFalse(model.property(name="Description").output)
self.assertTrue(model.property(name="Website").output)
self.assertTrue(model.property(name="Sale?").output)
for instance in associated_instances:
self.assertTrue(instance.category == Category.INSTANCE)
self.assertTrue(len(associated_models) == 3)
self.assertTrue(len(associated_instances) == 4)
def test_activity_associated_objects_ids(self):
# setUp
activity_name = "Task - Form + Tables + Service"
activity = self.project.activity(name=activity_name)
associated_object_ids = activity.associated_object_ids()
# testing
self.assertTrue(len(associated_object_ids) == 17)
def test_activity_parts_of_specific_type(self):
# setUp
activity_name = "Task - Form + Tables + Service"
bike_model = self.project.model(name="Bike")
activity = self.project.activity(name=activity_name)
associated_models = activity.parts(category=Category.MODEL)
# testing
for model in associated_models:
self.assertTrue(model.category == Category.MODEL)
if model == bike_model:
self.assertTrue(model.property(name="Gears").output)
self.assertFalse(model.property(name="Total height").output)
self.assertFalse(model.property(name="Picture").output)
self.assertFalse(model.property(name="Description").output)
self.assertTrue(model.property(name="Website").output)
self.assertTrue(model.property(name="Sale?").output)
self.assertTrue(len(associated_models) == 3)
class TestActivityDownloadAsPDF(TestBetamax):
def test_activity_download_as_pdf(self):
# setUp
activity_name = "Task - Form"
activity = self.project.activity(name=activity_name)
# testing
with temp_chdir() as target_dir:
pdf_file = activity.download_as_pdf(
target_dir=target_dir,
pdf_filename="pdf_file",
)
self.assertTrue(os.path.exists(pdf_file))
pdf_file_called_after_activity = activity.download_as_pdf(
target_dir=target_dir,
)
self.assertTrue(os.path.exists(pdf_file_called_after_activity))
@pytest.mark.skipif(
"os.getenv('TRAVIS', False) or os.getenv('GITHUB_ACTIONS', False)",
reason="Skipping tests when using Travis or Github Actions, as not Auth can be provided",
)
def test_activity_download_as_pdf_async(self):
activity_name = "Task - Form"
activity = self.project.activity(name=activity_name)
# testing
with temp_chdir() as target_dir:
pdf_file = activity.download_as_pdf(
target_dir=target_dir,
pdf_filename="pdf_file",
include_appendices=True,
include_qr_code=True,
)
self.assertTrue(os.path.exists(pdf_file))
def test_activity_share_link(self):
# setUp
test_user = self.client.user(username="testuser")
activity_name = "Task - Form"
message = "EXAMPLE_MESSAGE"
subject = "EXAMPLE_SUBJECT"
recipient_users = [test_user]
activity = self.project.activity(name=activity_name)
activity.share_link(
subject=subject,
message=message,
recipient_users=recipient_users,
)
# testing
notifications = self.client.notifications(
subject=subject,
message=message,
event=NotificationEvent.SHARE_ACTIVITY_LINK,
)
self.assertEqual(self.client.last_response.status_code, requests.codes.ok)
self.assertTrue(len(notifications), 1)
# tearDown
notifications[0].delete()
def test_activity_share_pdf(self):
# setUp
test_user = self.client.user(username="testuser")
activity_name = "Task - Form"
message = "EXAMPLE_MESSAGE"
subject = "EXAMPLE_SUBJECT"
paper_size = PaperSize.A2
paper_orientation = PaperOrientation.PORTRAIT
recipient_users = [test_user]
activity = self.project.activity(name=activity_name)
activity.share_pdf(
subject=subject,
message=message,
recipient_users=recipient_users,
paper_size=paper_size,
paper_orientation=paper_orientation,
include_appendices=False,
include_qr_code=True,
)
# testing
notifications = self.client.notifications(
subject=subject, message=message, event=NotificationEvent.SHARE_ACTIVITY_PDF
)
self.assertEqual(self.client.last_response.status_code, requests.codes.ok)
self.assertTrue(len(notifications), 1)
# tearDown
notifications[0].delete()
def test_activity_share_pdf_with_from_user(self):
# setUp
test_user = self.client.user(username="anotheruser")
from_user = self.client.user(username="testuser")
activity_name = "Task - Form"
message = "EXAMPLE_MESSAGE"
subject = "EXAMPLE_SUBJECT"
paper_size = PaperSize.A2
paper_orientation = PaperOrientation.PORTRAIT
recipient_users = [test_user]
activity = self.project.activity(name=activity_name)
activity.share_pdf(
from_user=from_user,
subject=subject,
message=message,
recipient_users=recipient_users,
paper_size=paper_size,
paper_orientation=paper_orientation,
include_appendices=False,
include_qr_code=True,
)
# testing
notifications = self.client.notifications(
subject=subject, message=message, event=NotificationEvent.SHARE_ACTIVITY_PDF
)
self.assertEqual(self.client.last_response.status_code, requests.codes.ok)
self.assertTrue(len(notifications), 1)
# tearDown
notifications[0].delete()
def test_activity_share_link_with_from_user(self):
# setUp
test_user = self.client.user(username="anotheruser")
from_user = self.client.user(username="testuser")
activity_name = "Task - Form"
message = "EXAMPLE_MESSAGE"
subject = "EXAMPLE_SUBJECT"
recipient_users = [test_user]
activity = self.project.activity(name=activity_name)
activity.share_link(
from_user=from_user,
subject=subject,
message=message,
recipient_users=recipient_users,
)
# testing
notifications = self.client.notifications(
subject=subject,
message=message,
event=NotificationEvent.SHARE_ACTIVITY_LINK,
)
self.assertEqual(self.client.last_response.status_code, requests.codes.ok)
self.assertTrue(len(notifications), 1)
# tearDown
notifications[0].delete()
| 1.953125 | 2 |
setup.py | asmiyusau/ShazamIO | 111 | 12771214 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="shazamio",
version="0.0.5",
author="dotX12",
description="Is a FREE asynchronous library from reverse engineered Shazam API written in Python 3.6+ with asyncio and aiohttp. Includes all the methods that Shazam has, including searching for a song by file.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dotX12/ShazamIO",
install_requires=['aiohttp', 'pydub', 'numpy', 'aiofiles', 'dataclass-factory',],
packages=setuptools.find_packages(),
python_requires='>=3.6',
)
| 1.679688 | 2 |
chrome/installer/mac/universalizer.py | zealoussnow/chromium | 14,668 | 12771215 | #!/usr/bin/env python
# coding: utf-8
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import errno
import filecmp
import os
import plistlib
import shutil
import stat
import subprocess
import sys
import time
def _stat_or_none(path, root):
"""Calls os.stat or os.lstat to obtain information about a path.
This program traverses parallel directory trees, which may have subtle
differences such as directory entries that are present in fewer than all
trees. It also operates on symbolic links directly, instead of on their
targets.
Args:
path: The path to call os.stat or os.lstat on.
root: True if called on the root of a tree to be merged, False
otherwise. See the discussion below.
Returns:
The return value of os.stat or os.lstat, or possibly None if the path
does not exist.
When root is True, indicating that path is at the root of one of these
trees, this permissiveness is disabled, as all roots are required to be
present. If one is absent, an exception will be raised. When root is True,
os.stat will be used, as this is the one case when it is desirable to
operate on a symbolic link’s target.
When root is False, os.lstat will be used to operate on symbolic links
directly, and a missing path will cause None to be returned.
"""
if root:
return os.stat(path)
try:
return os.lstat(path)
except OSError as e:
if e.errno == errno.ENOENT:
return None
raise
def _file_type_for_stat(st):
"""Returns a string indicating the type of directory entry in st.
Args:
st: The return value of os.stat or os.lstat.
Returns:
'symbolic link', 'file', or 'directory'.
"""
if stat.S_ISLNK(st.st_mode):
return 'symbolic_link'
if stat.S_ISREG(st.st_mode):
return 'file'
if stat.S_ISDIR(st.st_mode):
return 'directory'
raise Exception('unknown file type for mode 0o%o' % mode)
def _sole_list_element(l, exception_message):
"""Assures that every element in a list is identical.
Args:
l: The list to consider.
exception_message: A message used to convey failure if every element in
l is not identical.
Returns:
The value of each identical element in the list.
"""
s = set(l)
if len(s) != 1:
raise Exception(exception_message)
return l[0]
def _read_plist(path):
"""Reads a macOS property list, API compatibility adapter."""
with open(path, 'rb') as file:
try:
# New API, available since Python 3.4.
return plistlib.load(file)
except AttributeError:
# Old API, available (but deprecated) until Python 3.9.
return plistlib.readPlist(file)
def _write_plist(value, path):
"""Writes a macOS property list, API compatibility adapter."""
with open(path, 'wb') as file:
try:
# New API, available since Python 3.4.
plistlib.dump(value, file)
except AttributeError:
# Old API, available (but deprecated) until Python 3.9.
plistlib.writePlist(value, file)
class CantMergeException(Exception):
"""Raised when differences exist between input files such that they cannot
be merged successfully.
"""
pass
def _merge_info_plists(input_paths, output_path):
"""Merges multiple macOS Info.plist files.
Args:
input_plists: A list of paths containing Info.plist files to be merged.
output_plist: The path of the merged Info.plist to create.
Raises:
CantMergeException if all input_paths could not successfully be merged
into output_path.
A small number of differences are tolerated in the input Info.plists. If a
key identifying the build environment (OS or toolchain) is different in any
of the inputs, it will be removed from the output. There are valid reasons
to produce builds for different architectures using different toolchains or
SDKs, and there is no way to rationalize these differences into a single
value.
If present, the Chrome KSChannelID family of keys are rationalized by using
“universal” to identify the architecture (compared to, for example,
“arm64”.)
"""
input_plists = [_read_plist(x) for x in input_paths]
output_plist = input_plists[0]
for index in range(1, len(input_plists)):
input_plist = input_plists[index]
for key in set(input_plist.keys()) | set(output_plist.keys()):
if input_plist.get(key, None) == output_plist.get(key, None):
continue
if key in ('BuildMachineOSBuild', 'DTCompiler', 'DTPlatformBuild',
'DTPlatformName', 'DTPlatformVersion', 'DTSDKBuild',
'DTSDKName', 'DTXcode', 'DTXcodeBuild'):
if key in input_plist:
del input_plist[key]
if key in output_plist:
del output_plist[key]
elif key == 'KSChannelID' or key.startswith('KSChannelID-'):
# These keys are Chrome-specific, where it’s only present in the
# outer browser .app’s Info.plist.
#
# Ensure that the values match the expected format as a
# prerequisite to what follows.
key_tail = key[len('KSChannelID'):]
input_value = input_plist.get(key, '')
output_value = output_plist.get(key, '')
assert input_value.endswith(key_tail)
assert output_value.endswith(key_tail)
# Find the longest common trailing sequence of hyphen-separated
# elements, and use that as the trailing sequence of the new
# value.
input_parts = reversed(input_value.split('-'))
output_parts = output_value.split('-')
output_parts.reverse()
new_parts = []
for input_part, output_part in zip(input_parts, output_parts):
if input_part == output_part:
new_parts.append(output_part)
else:
break
# Prepend “universal” to the entire value if it’s not already
# there.
if len(new_parts) == 0 or new_parts[-1] != 'universal':
new_parts.append('universal')
output_plist[key] = '-'.join(reversed(new_parts))
assert output_plist[key] != ''
else:
raise CantMergeException(input_paths[index], output_path)
_write_plist(output_plist, output_path)
def _universalize(input_paths, output_path, root):
"""Merges multiple trees into a “universal” tree.
This function provides the recursive internal implementation for
universalize.
Args:
input_paths: The input directory trees to be merged.
output_path: The merged tree to produce.
root: True if operating at the root of the input and output trees.
"""
input_stats = [_stat_or_none(x, root) for x in input_paths]
for index in range(len(input_paths) - 1, -1, -1):
if input_stats[index] is None:
del input_paths[index]
del input_stats[index]
input_types = [_file_type_for_stat(x) for x in input_stats]
type = _sole_list_element(
input_types,
'varying types %r for input paths %r' % (input_types, input_paths))
if type == 'file':
identical = True
for index in range(1, len(input_paths)):
if not filecmp.cmp(input_paths[0], input_paths[index]):
identical = False
if (os.path.basename(output_path) == 'Info.plist' or
os.path.basename(output_path).endswith('-Info.plist')):
_merge_info_plists(input_paths, output_path)
else:
command = ['lipo', '-create']
command.extend(input_paths)
command.extend(['-output', output_path])
subprocess.check_call(command)
if identical:
shutil.copyfile(input_paths[0], output_path)
elif type == 'directory':
os.mkdir(output_path)
entries = set()
for input in input_paths:
entries.update(os.listdir(input))
for entry in entries:
input_entry_paths = [os.path.join(x, entry) for x in input_paths]
output_entry_path = os.path.join(output_path, entry)
_universalize(input_entry_paths, output_entry_path, False)
elif type == 'symbolic_link':
targets = [os.readlink(x) for x in input_paths]
target = _sole_list_element(
targets, 'varying symbolic link targets %r for input paths %r' %
(targets, input_paths))
os.symlink(target, output_path)
input_permissions = [stat.S_IMODE(x.st_mode) for x in input_stats]
permission = _sole_list_element(
input_permissions, 'varying permissions %r for input paths %r' %
(['0o%o' % x for x in input_permissions], input_paths))
os.lchmod(output_path, permission)
if type != 'file' or identical:
input_mtimes = [x.st_mtime for x in input_stats]
if len(set(input_mtimes)) == 1:
times = (time.time(), input_mtimes[0])
try:
# follow_symlinks is only available since Python 3.3.
os.utime(output_path, times, follow_symlinks=False)
except TypeError:
# If it’s a symbolic link and this version of Python isn’t able
# to set its timestamp, just leave it alone.
if type != 'symbolic_link':
os.utime(output_path, times)
elif type == 'directory':
# Always touch directories, in case a directory is a bundle, as a
# cue to LaunchServices to invalidate anything it may have cached
# about the bundle as it was being built.
os.utime(output_path, None)
def universalize(input_paths, output_path):
"""Merges multiple trees into a “universal” tree.
Args:
input_paths: The input directory trees to be merged.
output_path: The merged tree to produce.
input_paths are expected to be parallel directory trees. Each directory
entry at a given subpath in the input_paths, if present, must be identical
to all others when present, with these exceptions:
- Mach-O files that are not identical are merged using lipo.
- Info.plist files that are not identical are merged by _merge_info_plists.
"""
rmtree_on_error = not os.path.exists(output_path)
try:
return _universalize(input_paths, output_path, True)
except:
if rmtree_on_error and os.path.exists(output_path):
shutil.rmtree(output_path)
raise
def main(args):
parser = argparse.ArgumentParser(
description='Merge multiple single-architecture directory trees into a '
'single universal tree.')
parser.add_argument(
'inputs',
nargs='+',
metavar='input',
help='An input directory tree to be merged. At least two inputs must '
'be provided.')
parser.add_argument('output', help='The merged directory tree to produce.')
parsed = parser.parse_args(args)
if len(parsed.inputs) < 2:
raise Exception('too few inputs')
universalize(parsed.inputs, parsed.output)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 2.484375 | 2 |
problem0751.py | kmarcini/Project-Euler-Python | 0 | 12771216 | <filename>problem0751.py
###########################
#
# #751 Concatenation Coincidence - Project Euler
# https://projecteuler.net/problem=751
#
# Code by <NAME>
#
###########################
| 1.609375 | 2 |
course_parallel_computer/Section_1_Python_Parallel_Programming_Solutions/Section2/using_with_statement.py | software-foundations/learning-distributed-systems | 0 | 12771217 | <filename>course_parallel_computer/Section_1_Python_Parallel_Programming_Solutions/Section2/using_with_statement.py
import threading
import logging
"""
Mostra que um lock pode ser adquirido usando with ao invés de usar lock.aquire()
Isso foi testado para lock, RLock, Semáforo, Condição e semáforo
"""
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',)
def threading_with(statement):
with statement:
logging.debug('%s acquired via with' %statement)
def threading_not_with(statement):
statement.acquire()
try:
logging.debug('%s acquired directly' %statement )
finally:
statement.release()
if __name__ == '__main__':
#let's create a test battery
lock = threading.Lock()
rlock = threading.RLock()
condition = threading.Condition()
mutex = threading.Semaphore(1)
threading_synchronization_list = [lock ,rlock , condition , mutex]
#in the for cycle we call the threading_with e threading_no_with function
for statement in threading_synchronization_list :
t1 = threading.Thread(target=threading_with, args=(statement,))
t2 = threading.Thread(target=threading_not_with, args=(statement,))
t1.start()
t2.start()
t1.join()
t2.join()
| 3.75 | 4 |
TEMpcPlot/tables/pt_tables.py | Prestipino/TEMpcPlot | 0 | 12771218 | <filename>TEMpcPlot/tables/pt_tables.py
#!usr/bin/python
# -*- coding: utf-8 -*-
# pt_table.py
"""set of funcion for atomic weight calculation
the modules contain some data
N_av constant avogadro number
Faraday constant Faraday constant (C/mol) n coulomb equal to a mole of e-
elements list Names of atoms
atomic_weigh dict Atomic weight of atoms uma
bond_distances dict bond distance Angstrom
"""
def pt_p(atom, property):
"""Atomic properties
Tables with atomic properties
Args:
property (str): property type
Return:
floats, string : property of the atoms
Note:
| Examples:
| >>>pt_p(34, 'sym')
| >>>pt_p('Cu', 'At_w')
|
| 'At_w' : atomic weight
| 'Z' : atomic number
| 'cov_r' : covalent radii
| 'sym' : atomic symbol
| 'e_conf' : electronic conf.
| 'ox_st' : oxydation state
| 'bon_dis' : typical bond distances
| 'edges' : x-ray edges
"""
ava_prop=['At_w', 'Z', 'cov_r', 'sym', 'e_conf', 'ox_st', 'edges', 'bon_dis']
if atom:
if isinstance(atom, int):
if property=='sym':return elements[atom]
else: atom = elements[atom]
assert atom in elements, 'element unknown'
assert property in ava_prop, 'property unknown'
if property=='e_conf' : return e_conf(pt_prop[atom]['Z'])
if property=='ox_st' : return set(Common_OxStates[pt_prop[atom]['Z']])
if property=='edges' : return Edge_energy[atom]
if property=='bon_dis' :
a = [i for i in bond_distances.keys() if atom in i.split('-')]
return {x: bond_distances[x] for x in a}
return pt_prop[atom][property]
#: avoGadRo number
N_av=6.02214129e23 #avocado number
#: Faraday constant
Faraday= 96485.3329 # faraday n electron in 1 coulomb
# Regle approximative de Klechkowski pour le remplissage des électrons dans les atomes
Klechkowski=['1s','2s','2p','3s','3p','4s','3d','4p','5s','4d','5p','6s','4f','5d','6p','7s','5f','6d','7p']
from scipy import interpolate
elements = ["", "H" , "He", "Li", "Be", "B" , "C" , "N" , "O" , "F" , "Ne", "Na",
"Mg", "Al", "Si", "P" , "S" , "Cl", "Ar", "K" , "Ca", "Sc", "Ti", "V" , "Cr", "Mn",
"Fe", "Co", "Ni", "Cu", "Zn", "Ga", "Ge", "As", "Se", "Br", "Kr", "Rb", "Sr", "Y" ,
"Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn", "Sb", "Te", "I" ,
"Xe", "Cs", "Ba", "La", "Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho",
"Er", "Tm", "Yb", "Lu", "Hf", "Ta", "W" , "Re", "Os", "Ir", "Pt", "Au", "Hg", "Tl",
"Pb", "Bi", "Po", "At", "Rn", "Fr", "Ra", "Ac", "Th", "Pa", "U", "Np", "Pu", "Am", "Cu"]
pt_prop ={'H' : {'Z': 1, 'At_w': 1.0079, 'cov_r': 0.31, 'PauliX': 2.2},
'He' : {'Z': 2, 'At_w': 4.0026, 'cov_r': 0.28, 'PauliX': 0.0},
'Li' : {'Z': 3, 'At_w': 6.941, 'cov_r': 1.28, 'PauliX': 0.98},
'Be' : {'Z': 4, 'At_w': 9.0122, 'cov_r': 0.96, 'PauliX': 1.57},
'B' : {'Z': 5, 'At_w': 10.811, 'cov_r': 0.85, 'PauliX': 2.04},
'C' : {'Z': 6, 'At_w': 12.0107, 'cov_r': 0.76, 'PauliX': 2.55},
'N' : {'Z': 7, 'At_w': 14.0067, 'cov_r': 0.71, 'PauliX': 3.04},
'O' : {'Z': 8, 'At_w': 15.9994, 'cov_r': 0.66, 'PauliX': 3.44},
'F' : {'Z': 9, 'At_w': 18.9984, 'cov_r': 0.57, 'PauliX': 3.98},
'Ne' : {'Z': 10, 'At_w': 20.1797, 'cov_r': 0.58, 'PauliX': 0.0},
'Na' : {'Z': 11, 'At_w': 22.9897, 'cov_r': 1.66, 'PauliX': 0.93},
'Mg' : {'Z': 12, 'At_w': 24.305, 'cov_r': 1.41, 'PauliX': 1.31},
'Al' : {'Z': 13, 'At_w': 26.9815, 'cov_r': 1.21, 'PauliX': 1.61},
'Si' : {'Z': 14, 'At_w': 28.0855, 'cov_r': 1.11, 'PauliX': 1.9},
'P' : {'Z': 15, 'At_w': 30.9738, 'cov_r': 1.07, 'PauliX': 2.19},
'S' : {'Z': 16, 'At_w': 32.065, 'cov_r': 1.05, 'PauliX': 2.58},
'Cl' : {'Z': 17, 'At_w': 35.453, 'cov_r': 1.02, 'PauliX': 3.16},
'Ar' : {'Z': 18, 'At_w': 39.948, 'cov_r': 1.06, 'PauliX': 0.0},
'K' : {'Z': 19, 'At_w': 39.0983, 'cov_r': 2.03, 'PauliX': 0.82},
'Ca' : {'Z': 20, 'At_w': 40.078, 'cov_r': 1.76, 'PauliX': 1.0},
'Sc' : {'Z': 21, 'At_w': 44.9559, 'cov_r': 1.7, 'PauliX': 1.36},
'Ti' : {'Z': 22, 'At_w': 47.867, 'cov_r': 1.6, 'PauliX': 1.54},
'V' : {'Z': 23, 'At_w': 50.9415, 'cov_r': 1.53, 'PauliX': 1.63},
'Cr' : {'Z': 24, 'At_w': 51.9961, 'cov_r': 1.39, 'PauliX': 1.66},
'Mn' : {'Z': 25, 'At_w': 54.938, 'cov_r': 1.39, 'PauliX': 1.55},
'Fe' : {'Z': 26, 'At_w': 55.845, 'cov_r': 1.32, 'PauliX': 1.83},
'Co' : {'Z': 27, 'At_w': 58.9332, 'cov_r': 1.26, 'PauliX': 1.88},
'Ni' : {'Z': 28, 'At_w': 58.6934, 'cov_r': 1.24, 'PauliX': 1.91},
'Cu' : {'Z': 29, 'At_w': 63.546, 'cov_r': 1.69, 'PauliX': 1.9},
'Zn' : {'Z': 30, 'At_w': 65.39, 'cov_r': 1.22, 'PauliX': 1.65},
'Ga' : {'Z': 31, 'At_w': 69.723, 'cov_r': 1.22, 'PauliX': 1.81},
'Ge' : {'Z': 32, 'At_w': 72.64, 'cov_r': 1.2, 'PauliX': 2.01},
'As' : {'Z': 33, 'At_w': 74.9216, 'cov_r': 1.19, 'PauliX': 2.18},
'Se' : {'Z': 34, 'At_w': 78.96, 'cov_r': 1.2, 'PauliX': 2.55},
'Br' : {'Z': 35, 'At_w': 79.904, 'cov_r': 1.2, 'PauliX': 2.96},
'Kr' : {'Z': 36, 'At_w': 83.8, 'cov_r': 1.16, 'PauliX': 3.0},
'Rb' : {'Z': 37, 'At_w': 85.4678, 'cov_r': 2.2, 'PauliX': 0.82},
'Sr' : {'Z': 38, 'At_w': 87.62, 'cov_r': 1.95, 'PauliX': 0.95},
'Y' : {'Z': 39, 'At_w': 88.9059, 'cov_r': 1.9, 'PauliX': 1.22},
'Zr' : {'Z': 40, 'At_w': 91.224, 'cov_r': 1.75, 'PauliX': 1.33},
'Nb' : {'Z': 41, 'At_w': 92.9064, 'cov_r': 1.64, 'PauliX': 1.6},
'Mo' : {'Z': 42, 'At_w': 95.94, 'cov_r': 1.54, 'PauliX': 2.16},
'Tc' : {'Z': 43, 'At_w': 98, 'cov_r': 1.47, 'PauliX': 1.9},
'Ru' : {'Z': 44, 'At_w': 101.07, 'cov_r': 1.46, 'PauliX': 2.2},
'Rh' : {'Z': 45, 'At_w': 102.906, 'cov_r': 1.42, 'PauliX': 2.28},
'Pd' : {'Z': 46, 'At_w': 106.42, 'cov_r': 1.39, 'PauliX': 2.2},
'Ag' : {'Z': 47, 'At_w': 107.868, 'cov_r': 1.45, 'PauliX': 1.93},
'Cd' : {'Z': 48, 'At_w': 112.411, 'cov_r': 1.44, 'PauliX': 1.69},
'In' : {'Z': 49, 'At_w': 114.818, 'cov_r': 1.42, 'PauliX': 1.78},
'Sn' : {'Z': 50, 'At_w': 118.71, 'cov_r': 1.39, 'PauliX': 1.96},
'Sb' : {'Z': 51, 'At_w': 121.76, 'cov_r': 1.39, 'PauliX': 2.05},
'Te' : {'Z': 52, 'At_w': 127.6, 'cov_r': 1.38, 'PauliX': 2.1},
'I' : {'Z': 53, 'At_w': 126.904, 'cov_r': 1.39, 'PauliX': 2.66},
'Xe' : {'Z': 54, 'At_w': 131.293, 'cov_r': 1.4, 'PauliX': 2.6},
'Cs' : {'Z': 55, 'At_w': 132.905, 'cov_r': 2.44, 'PauliX': 0.79},
'Ba' : {'Z': 56, 'At_w': 137.327, 'cov_r': 2.15, 'PauliX': 0.89},
'La' : {'Z': 57, 'At_w': 138.905, 'cov_r': 2.07, 'PauliX': 1.1},
'Ce' : {'Z': 58, 'At_w': 140.116, 'cov_r': 2.04, 'PauliX': 1.12},
'Pr' : {'Z': 59, 'At_w': 140.908, 'cov_r': 2.03, 'PauliX': 1.13},
'Nd' : {'Z': 60, 'At_w': 144.24, 'cov_r': 2.01, 'PauliX': 1.14},
'Pm' : {'Z': 61, 'At_w': 145, 'cov_r': 1.99, 'PauliX': 1.13},
'Sm' : {'Z': 62, 'At_w': 150.36, 'cov_r': 1.98, 'PauliX': 1.17},
'Eu' : {'Z': 63, 'At_w': 151.964, 'cov_r': 1.98, 'PauliX': 1.2},
'Gd' : {'Z': 64, 'At_w': 157.25, 'cov_r': 1.96, 'PauliX': 1.2},
'Tb' : {'Z': 65, 'At_w': 158.925, 'cov_r': 1.94, 'PauliX': 1.1},
'Dy' : {'Z': 66, 'At_w': 162.5, 'cov_r': 1.92, 'PauliX': 1.22},
'Ho' : {'Z': 67, 'At_w': 164.93, 'cov_r': 1.92, 'PauliX': 1.23},
'Er' : {'Z': 68, 'At_w': 167.259, 'cov_r': 1.89, 'PauliX': 1.24},
'Tm' : {'Z': 69, 'At_w': 168.934, 'cov_r': 1.9, 'PauliX': 1.25},
'Yb' : {'Z': 70, 'At_w': 173.04, 'cov_r': 1.87, 'PauliX': 1.1},
'Lu' : {'Z': 71, 'At_w': 174.967, 'cov_r': 1.87, 'PauliX': 1.27},
'Hf' : {'Z': 72, 'At_w': 178.49, 'cov_r': 1.75, 'PauliX': 1.3},
'Ta' : {'Z': 73, 'At_w': 180.948, 'cov_r': 1.7, 'PauliX': 1.5},
'W' : {'Z': 74, 'At_w': 183.84, 'cov_r': 1.62, 'PauliX': 2.36},
'Re' : {'Z': 75, 'At_w': 186.207, 'cov_r': 1.51, 'PauliX': 1.9},
'Os' : {'Z': 76, 'At_w': 190.23, 'cov_r': 1.44, 'PauliX': 2.2},
'Ir' : {'Z': 77, 'At_w': 192.217, 'cov_r': 1.41, 'PauliX': 2.2},
'Pt' : {'Z': 78, 'At_w': 195.078, 'cov_r': 1.36, 'PauliX': 2.28},
'Au' : {'Z': 79, 'At_w': 196.966, 'cov_r': 1.36, 'PauliX': 2.54},
'Hg' : {'Z': 80, 'At_w': 200.59, 'cov_r': 1.32, 'PauliX': 2.0},
'Tl' : {'Z': 81, 'At_w': 204.383, 'cov_r': 1.45, 'PauliX': 1.62},
'Pb' : {'Z': 82, 'At_w': 207.2, 'cov_r': 1.46, 'PauliX': 1.87},
'Bi' : {'Z': 83, 'At_w': 208.98, 'cov_r': 1.48, 'PauliX': 2.02},
'Po' : {'Z': 84, 'At_w': 209, 'cov_r': 1.4, 'PauliX': 2.0},
'At' : {'Z': 85, 'At_w': 210, 'cov_r': 1.5, 'PauliX': 2.2},
'Rn' : {'Z': 86, 'At_w': 222, 'cov_r': 1.5, 'PauliX': 2.2},
'Fr' : {'Z': 87, 'At_w': 223, 'cov_r': 2.6, 'PauliX': 0.7},
'Ra' : {'Z': 88, 'At_w': 226, 'cov_r': 2.21, 'PauliX': 0.9},
'Ac' : {'Z': 89, 'At_w': 227, 'cov_r': 2.15, 'PauliX': 1.1},
'Th' : {'Z': 90, 'At_w': 232.038, 'cov_r': 2.06, 'PauliX': 1.3},
'Pa' : {'Z': 91, 'At_w': 231.036, 'cov_r': 2.0, 'PauliX': 1.5},
'U' : {'Z': 92, 'At_w': 238.029, 'cov_r': 1.96, 'PauliX': 1.38},
'Np' : {'Z': 93, 'At_w': 237, 'cov_r': 1.9, 'PauliX': 1.36},
'Pu' : {'Z': 94, 'At_w': 244, 'cov_r': 1.87, 'PauliX': 1.28},
'Am' : {'Z': 95, 'At_w': 243, 'cov_r': 1.8, 'PauliX': 1.13},}
Common_OxStates = [(None),
( -1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ),
( -4 , -3 , -2 , -1 , 1 , 2 , 3 , 4 ),
( -3 , 3 , 5 , 0 , 0 , 0 , 0 , 0 ),
( -2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( -1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( -4 , 4 , 0 , 0 , 0 , 0 , 0 , 0 ),
( -3 , 3 , 5 , 0 , 0 , 0 , 0 , 0 ),
( -2 , 2 , 4 , 6 , 0 , 0 , 0 , 0 ),
( -1 , 1 , 3 , 5 , 7 , 0 , 0 , 0 ),
( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 4 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 5 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 6 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 4 , 7 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 3 , 6 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 3 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( -4 , 2 , 4 , 0 , 0 , 0 , 0 , 0 ),
( -3 , 3 , 5 , 0 , 0 , 0 , 0 , 0 ),
( -2 , 2 , 4 , 6 , 0 , 0 , 0 , 0 ),
( -1 , 1 , 3 , 5 , 0 , 0 , 0 , 0 ),
( 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 4 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 5 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 4 , 6 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 4 , 7 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 4 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 4 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( -4 , 2 , 4 , 0 , 0 , 0 , 0 , 0 ),
( -3 , 3 , 5 , 0 , 0 , 0 , 0 , 0 ),
( -2 , 2 , 4 , 6 , 0 , 0 , 0 , 0 ),
( -1 , 1 , 3 , 5 , 7 , 0 , 0 , 0 ),
( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 4 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 3 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 4 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 5 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 4 , 6 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 4 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 4 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 4 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 4 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 1 , 2 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 4 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( -2 , 2 , 4 , 0 , 0 , 0 , 0 , 0 ),
( -1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 4 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 5 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 6 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 5 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 4 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 4 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 5 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 6 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 7 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ),
( 8 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )]
def e_conf(ne):
"""print electronic configuration for an atom
"""
assert ne>0, 'atomic number should > 0'
ep={'s':2, 'p':6, 'd':10, 'f':14}
NG=[2,10,18,36,54,86]
NL=[i for i in NG if ne-i>0]
if NL==[]:
return '1s{:d}'.format(ne)
else: NL=NL[-1]
NG_i='{:d}s'.format(NG.index(NL)+2)
start=Klechkowski.index(NG_i)
old ='[{:s}] '.format(pt_p(NL, 'sym'))
ne-=NL
for i in Klechkowski[start:]:
ne-= ep[i[-1]]
if ne > 0:
old = '{:s}{:s}{:d} '.format(old,i,ep[i[-1]])
else:
return '{:s}{:s}{:d} '.format(old,i,ep[i[-1]]+ne)
QN_Transition = ["K","L1","L2","L3"]
Transition = [ {"K": "1s"}, {"L1": "2s"}, {"L2": "2p1/2"}, {"L3": "2p3/2"}, {"M1": "3s"}, {"M2": "3p1/2"}, {"M3": "3p3/2 "}, {"M4": "3d3/2"}, {"M5": "3d5/2"}, {"N1": "4s"}, {"N2": "4p1/2"}, {"N3": "4p3/2"}, {"N4": "4d3/2"}, {"N5": "4d5/2"}, {"N6": "4f5/2"}, {"N7": "4f7/2"}, {"O1": "5s"}, {"O2": "5p1/2 "}, {"O3": "5p3/2"}, {"O4": "5d3/2"}, {"O5": "5d5/2"}, {"P1": "6s"}, {"P2": "6p1/2"}, {"P3": "6p3/2"} ],
Edge_energy ={
"H" : [13.6 ],
"He" : [24.6 ],
"Li" : [54.7 ],
"Be" : [111.5 ],
"B" : [188 ],
"C" : [284.2 ],
"N" : [409.9 , 37.3 ],
"O" : [543.1 , 41.6 ],
"F" : [696.7],
"Ne" : [870.2 , 48.5 , 21.7 , 21.6 ],
"Na" : [1070.8 , 63.5 , 30.65 , 30.81 ],
"Mg" : [1303.0 , 88.7 , 49.78 , 49.50 ],
"Al" : [1559.6 , 117.8 , 72.95 , 72.55 ],
"Si" : [1839 , 149.7 , 99.82 , 99.42 ],
"P" : [2145.5 , 189 , 136 , 135 ],
"S" : [2472 , 230.9 , 163.6 , 162.5 ],
"Cl" : [2822.4 , 270 , 202 , 200 ],
"Ar" : [3205.9 , 326.3 , 250.6 , 248.4 , 29.3 , 15.9 , 15.7 ],
"K" : [3608.4 , 378.6 , 297.3 , 294.6 , 34.8 , 18.3 , 18.3 ],
"Ca" : [4038.5 , 438.4 , 349.7 , 346.2 , 44.3 , 25.4 , 25.4 ],
"Sc" : [4492 , 498.0 , 403.6 , 398.7 , 51.1 , 28.3 , 28.3 ],
"Ti" : [4966 , 560.9 , 460.2 , 453.8 , 58.7 , 32.6 , 32.6 ],
"V" : [5465 , 626.7 , 519.8 , 512.1 , 66.3 , 37.2 , 37.2 ],
"Cr" : [5989 , 696.0 , 583.8 , 574.1 , 74.1 , 42.2 , 42.2 ],
"Mn" : [6539 , 769.1 , 649.9 , 638.7 , 82.3 , 47.2 , 47.2 ],
"Fe" : [7112 , 844.6 , 719.9 , 706.8 , 91.3 , 52.7 , 52.7 ],
"Co" : [7709 , 925.1 , 793.2 , 778.1 , 101.0 , 58.9 , 59.9 ],
"Ni" : [8333 , 1008.6 , 870.0 , 852.7 , 110.8 , 68.0 , 66.2 ],
"Cu" : [8979 , 1096.7 , 952.3 , 932.7 , 122.5 , 77.3 , 75.1 ],
"Zn" : [9659 , 1196.2 , 1044.9 , 1021.8 , 139.8 , 91.4 , 88.6 , 10.2 , 10.1 ],
"Ga" : [10367 , 1299.0 , "-" , 1143.2 , 1116.4 , 159.5 , 103.5 , 100.0 , 18.7 , 18.7 ],
"Ge" : [11103 , 1414.6 , "-" , 1248.1 , "-" , 1217.0 , "-" , 180.1 , 124.9 , 120.8 , 29.8 , 29.2 ],
"As" : [11867 , 1527.0 , "-" , 1359.1 , "-" , 1323.6 , "-" , 204.7 , 146.2 , 141.2 , 41.7 , 41.7 ],
"Se" : [12658 , 1652.0 , "-" , 1474.3 , "-" , 1433.9 , "-" , 229.6 , 166.5 , 160.7 , 55.5 , 54.6 ],
"Br" : [13474 , 1782 , 1596 , 1550 , 257 , 189 , 182 , 70 , 69 ],
"Kr" : [14326 , 1921 , 1730.9 , 1678.4 , 292.8 , 222.2 , 214.4 , 95.0 , 93.8 , 27.5 , 14.1 , 14.1 ],
"Rb" : [15200 , 2065 , 1864 , 1804 , 326.7 , 248.7 , 239.1 , 113.0 , 112 , 30.5 , 16.3 , 15.3 ],
"Sr" : [16105 , 2216 , 2007 , 1940 , 358.7 , 280.3 , 270.0 , 136.0 , 134.2 , 38.9 , 21.3 , 20.1 ],
"Y" : [17038 , 2373 , 2156 , 2080 , 392.0 , 310.6 , 298.8 , 157.7 , 155.8 , 43.8 , 24.4 , 23.1 ],
"Zr" : [17998 , 2532 , 2307 , 2223 , 430.3 , 343.5 , 329.8 , 181.1 , 178.8 , 50.6 , 28.5 , 27.1 ],
"Nb" : [18986 , 2698 , 2465 , 2371 , 466.6 , 376.1 , 360.6 , 205.0 , 202.3 , 56.4 , 32.6 , 30.8 ],
"Mo" : [20000 , 2866 , 2625 , 2520 , 506.3 , 411.6 , 394.0 , 231.1 , 227.9 , 63.2 , 37.6 , 35.5 ],
"Tc" : [21044 , 3043 , 2793 , 2677 , 544 , 447.6 , 417.7 , 257.6 , 253.9 , 69.5 , 42.3 , 39.9 ],
"Ru" : [22117 , 3224 , 2967 , 2838 , 586.1 , 483.5 , 461.4 , 284.2 , 280.0 , 75.0 , 46.3 , 43.2 ],
"Rh" : [23220 , 3412 , 3146 , 3004 , 628.1 , 521.3 , 496.5 , 311.9 , 307.2 , 81.4 , 50.5 , 47.3 ],
"Pd" : [24350 , 3604 , 3330 , 3173 , 671.6 , 559.9 , 532.3 , 340.5 , 335.2 , 87.1 , 55.7 , 50.9 ],
"Ag" : [25514 , 3806 , 3524 , 3351 , 719.0 , 603.8 , 573.0 , 374.0 , 368.3 , 97.0 , 63.7 , 58.3 ],
"Cd" : [26711 , 4018 , 3727 , 3538 , 772.0 , 652.6 , 618.4 , 411.9 , 405.2 , 109.8 , 63.9 , 63.9 , 11.7 , 10.7 ],
"In" : [27940 , 4238 , 3938 , 3730 , 827.2 , 703.2 , 665.3 , 451.4 , 443.9 , 122.9 , 73.5 , 73.5 , 17.7 , 16.9 ],
"Sn" : [29200 , 4465 , 4156 , 3929 , 884.7 , 756.5 , 714.6 , 493.2 , 484.9 , 137.1 , 83.6 , 83.6 , 24.9 , 23.9 ],
"Sb" : [30491 , 4698 , 4380 , 4132 , 946 , 812.7 , 766.4 , 537.5 , 528.2 , 153.2 , 95.6 , 95.6 , 33.3 , 32.1 ],
"Te" : [31814 , 4939 , 4612 , 4341 , 1006 , 870.8 , 820.0 , 583.4 , 573.0 , 169.4 , 103.3 , 103.3 , 41.9 , 40.4 ],
"I" : [33169 , 5188 , 4852 , 4557 , 1072 , 931 , 875 , 630.8 , 619.3 , 186 , 123 , 123 , 50.6 , 48.9 ],
"Xe" : [34561 , 5453 , 5107 , 4786 , 1148.7 , 1002.1 , 940.6 , 689.0 , 676.4 , 213.2 , 146.7 , 145.5 , 69.5 , 67.5 , "-" , "-" , 23.3 , 13.4 , 12.1 ],
"Cs" : [35985 , 5714 , 5359 , 5012 , 1211 , 1071 , 1003 , 740.5 , 726.6 , 232.3 , 172.4 , 161.3 , 79.8 , 77.5 , "-" , "-" , 22.7 , 14.2 , 12.1 ],
"Ba" : [37441 , 5989 , 5624 , 5247 , 1293 , 1137 , 1063 , 795.7 , 780.5 , 253.5 , 192 , 178.6 , 92.6 , 89.9 , "-" , "-" , 30.3 , 17.0 , 14.8 ],
"La" : [38925 , 6266 , 5891 , 5483 , 1362 , 1209 , 1128 , 853 , 836 , 274.7 , 205.8 , 196.0 , 105.3 , 102.5 , "-" , "-" , 34.3 , 19.3 , 16.8 ],
"Ce" : [40443 , 6549 , 6164 , 5723 , 1436 , 1274 , 1187 , 902.4 , 883.8 , 291.0 , 223.2 , 206.5 , 109 , "-" , 0.1 , 0.1 , 37.8 , 19.8 , 17.0 ],
"Pr" : [41991 , 6835 , 6440 , 5964 , 1511 , 1337 , 1242 , 948.3 , 928.8 , 304.5 , 236.3 , 217.6 , 115.1 , 115.1 , 2.0 , 2.0 , 37.4 , 22.3 , 22.3 ],
"Nd" : [43569 , 7126 , 6722 , 6208 , 1575 , 1403 , 1297 , 1003.3 , 980.4 , 319.2 , 243.3 , 224.6 , 120.5 , 120.5 , 1.5 , 1.5 , 37.5 , 21.1 , 21.1 ],
"Pm" : [45184 , 7428 , 7013 , 6459 , "-" , 1471 , 1357 , 1052 , 1027 , "-" , 242 , 242 , 120 , 120 ],
"Sm" : [46834 , 7737 , 7312 , 6716 , 1723 , 1541 , 1420 , 1110.9 , 1083.4 , 347.2 , 265.6 , 247.4 , 129 , 129 , 5.2 , 5.2 , 37.4 , 21.3 , 21.3 ],
"Eu" : [48519 , 8052 , 7617 , 6977 , 1800 , 1614 , 1481 , 1158.6 , 1127.5 , 360 , 284 , 257 , 133 , 127.7 , 0 , 0 , 32 , 22 , 22 ],
"Gd" : [50239 , 8376 , 7930 , 7243 , 1881 , 1688 , 1544 , 1221.9 , 1189.6 , 378.6 , 286 , 271 , "-" , 142.6 , 8.6 , 8.6 , 36 , 28 , 21 ],
"Tb" : [51996 , 8708 , 8252 , 7514 , 1968 , 1768 , 1611 , 1276.9 , 1241.1 , 396.0 , 322.4 , 284.1 , 150.5 , 150.5 , 7.7 , 2.4 , 45.6 , 28.7 , 22.6 ],
"Dy" : [53789 , 9046 , 8581 , 7790 , 2047 , 1842 , 1676 , 1333 , 1292.6 , 414.2 , 333.5 , 293.2 , 153.6 , 153.6 , 8.0 , 4.3 , 49.9 , 26.3 , 26.3 ],
"Ho" : [55618 , 9394 , 8918 , 8071 , 2128 , 1923 , 1741 , 1392 , 1351 , 432.4 , 343.5 , 308.2 , 160 , 160 , 8.6 , 5.2 , 49.3 , 30.8 , 24.1 ],
"Er" : [57486 , 9751 , 9264 , 8358 , 2207 , 2006 , 1812 , 1453 , 1409 , 449.8 , 366.2 , 320.2 , 167.6 , 167.6 , "-" , 4.7 , 50.6 , 31.4 , 24.7 ],
"Tm" : [59390 , 10116 , 9617 , 8648 , 2307 , 2090 , 1885 , 1515 , 1468 , 470.9 , 385.9 , 332.6 , 175.5 , 175.5 , "-" , 4.6 , 54.7 , 31.8 , 25.0 ],
"Yb" : [61332 , 10486 , 9978 , 8944 , 2398 , 2173 , 1950 , 1576 , 1528 , 480.5 , 388.7 , 339.7 , 191.2 , 182.4 , 2.5 , 1.3 , 52.0 , 30.3 , 24.1 ],
"Lu" : [63314 , 10870 , 10349 , 9244 , 2491 , 2264 , 2024 , 1639 , 1589 , 506.8 , 412.4 , 359.2 , 206.1 , 196.3 , 8.9 , 7.5 , 57.3 , 33.6 , 26.7 ],
"Hf" : [65351 , 11271 , 10739 , 9561 , 2601 , 2365 , 2108 , 1716 , 1662 , 538 , 438.2 , 380.7 , 220.0 , 211.5 , 15.9 , 14.2 , 64.2 , 38 , 29.9 ],
"Ta" : [67416 , 11682 , 11136 , 9881 , 2708 , 2469 , 2194 , 1793 , 1735 , 563.4 , 463.4 , 400.9 , 237.9 , 226.4 , 23.5 , 21.6 , 69.7 , 42.2 , 32.7 ],
"W" : [69525 , 12100 , 11544 , 10207 , 2820 , 2575 , 2281 , 1872 , 1809 , 594.1 , 490.4 , 423.6 , 255.9 , 243.5 , 33.6 , 31.4 , 75.6 , 45.3 , 36.8 ],
"Re" : [71676 , 12527 , 11959 , 10535 , 2932 , 2682 , 2367 , 1949 , 1883 , 625.4 , 518.7 , 446.8 , 273.9 , 260.5 , 42.9 , 40.5 , 83 , 45.6 , 34.6 ],
"Os" : [73871 , 12968 , 12385 , 10871 , 3049 , 2792 , 2457 , 2031 , 1960 , 658.2 , 549.1 , 470.7 , 293.1 , 278.5 , 53.4 , 50.7 , 84 , 58 , 44.5 ],
"Ir" : [76111 , 13419 , 12824 , 11215 , 3174 , 2909 , 2551 , 2116 , 2040 , 691.1 , 577.8 , 495.8 , 311.9 , 296.3 , 63.8 , 60.8 , 95.2 , 63.0 , 48.0 ],
"Pt" : [78395 , 13880 , 13273 , 11564 , 3296 , 3027 , 2645 , 2202 , 2122 , 725.4 , 609.1 , 519.4 , 331.6 , 314.6 , 74.5 , 71.2 , 101.7 , 65.3 , 51.7 ],
"Au" : [80725 , 14353 , 13734 , 11919 , 3425 , 3148 , 2743 , 2291 , 2206 , 762.1 , 642.7 , 546.3 , 353.2 , 335.1 , 87.6 , 84.0 , 107.2 , 74.2 , 57.2 ],
"Hg" : [83102 , 14839 , 14209 , 12284 , 3562 , 3279 , 2847 , 2385 , 2295 , 802.2 , 680.2 , 576.6 , 378.2 , 358.8 , 104.0 , 99.9 , 127 , 83.1 , 64.5 , 9.6 , 7.8 ],
"Tl" : [85530 , 15347 , 14698 , 12658 , 3704 , 3416 , 2957 , 2485 , 2389 , 846.2 , 720.5 , 609.5 , 405.7 , 385.0 , 122.2 , 117.8 , 136.0 , 94.6 , 73.5 , 14.7 , 12.5 ],
"Pb" : [88005 , 15861 , 15200 , 13035 , 3851 , 3554 , 3066 , 2586 , 2484 , 891.8 , 761.9 , 643.5 , 434.3 , 412.2 , 141.7 , 136.9 , 147 , 106.4 , 83.3 , 20.7 , 18.1 ],
"Bi" : [90524 , 16388 , 15711 , 13419 , 3999 , 3696 , 3177 , 2688 , 2580 , 939 , 805.2 , 678.8 , 464.0 , 440.1 , 162.3 , 157.0 , 159.3 , 119.0 , 92.6 , 26.9 , 23.8 ],
"Po" : [93105 , 16939 , 16244 , 13814 , 4149 , 3854 , 3302 , 2798 , 2683 , 995 , 851 , 705 , 500 , 473 , 184 , 184 , 177 , 132 , 104 , 31 , 31 ],
"At" : [95730 , 17493 , 16785 , 14214 , 4317 , 4008 , 3426 , 2909 , 2787 , 1042 , 886 , 740 , 533 , 507 , 210 , 210 , 195 , 148 , 115 , 40 , 40 ],
"Rn" : [98404 , 18049 , 17337 , 14619 , 4482 , 4159 , 3538 , 3022 , 2892 , 1097 , 929 , 768 , 567 , 541 , 238 , 238 , 214 , 164 , 127 , 48 , 48 , 26 ],
"Fr" : [101137 , 18639 , 17907 , 15031 , 4652 , 4327 , 3663 , 3136 , 3000 , 1153 , 980 , 810 , 603 , 577 , 268 , 268 , 234 , 182 , 140 , 58 , 58 , 34 , 15 ,15 ],
"Ra" : [103922 , 19237 , 18484 , 15444 , 4822 , 4490 , 3792 , 3248 , 3105 , 1208 , 1058 , 879 , 636 , 603 , 299 , 299 , 254 , 200 , 153 , 68 , 68 , 44 , 19 ,19 ],
"Ac" : [106755 , 19840 , 19083 , 15871 , 5002 , 4656 , 3909 , 3370 , 3219 , 1269 , 1080 , 890 , 675 , 639 , 319 , 319 , 272 , 215 , 167 , 80 , 80 ],
"Th" : [109651 , 20472 , 19693 , 16300 , 5182 , 4830 , 4046 , 3491 , 3332 , 1330 , 1168 , 966.4 , 712.1 , 675.2 , 342.4 , 333.1 , 290 , 229 , 182 , 92.5 , 85.4 , 41.4 , 24.5 ,16.6 ],
"Pa" : [112601 , 21105 , 20314 , 16733 , 5367 , 5001 , 4174 , 3611 , 3442 , 1387 , 1224 , 1007 , 743 , 708 , 371 , 360 , 310 , 232 , 232 , 94 , 94 ],
"U" : [115606 , 21757 , 20948 , 17166 , 5548 , 5182 , 4303 , 3728 , 3552 , 1439 , 1271 , 1043 , 778.3 , 736.2 , 388.2 , 377.4 , 321 , 257 , 192 , 102.8 , 94.2 , 43.9 , 26.8 ,16.8 ]}
#Tabulation of curve core hole width versus Z from <NAME> and <NAME>, Atomic Data and Nuclear DataTables, Vol 14, Number 2, 1974.
gamach ={
'K' : [ [ 0.99, 10.0, 20.0, 40.0, 50.0, 60.0, 80.0, 95.1], [0.02, 0.28, 0.75, 4.8, 10.5, 21.0, 60.0, 105.0] ],
'L1' : [ [ 0.99, 18.0, 22.0, 35.0, 50.0, 52.0, 75.0, 95.1], [0.07, 3.9, 3.8, 7.0, 6.0, 3.7, 8.0, 19.0] ],
'L2' : [ [ 0.99, 17.0, 28.0, 31.0, 45.0, 60.0, 80.0, 95.1], [0.001, 0.12, 1.4, 0.8, 2.6, 4.1, 6.3, 10.5] ],
'L3' : [ [ 0.99, 17.0, 28.0, 31.0, 45.0, 60.0, 80.0, 95.1], [0.001, 0.12, 0.55, 0.7, 2.1, 3.5, 5.4, 9.0] ],
'M1' : [ [ 0.99, 20.0, 28.0, 30.0, 36.0, 53.0, 80.0, 95.1], [0.001, 1.0, 2.9, 2.2, 5.5, 10.0, 22.0, 22.0] ],
'M2' : [ [ 0.99, 20.0, 22.0, 30.0, 40.0, 68.0, 80.0, 95.1], [0.001, 0.001, 0.5, 2.0, 2.6, 11.0, 15.0, 16.0] ],
'M3' : [ [ 0.99, 20.0, 22.0, 30.0, 40.0, 68.0, 80.0, 95.1], [0.001, 0.001, 0.5, 2.0, 2.6, 11.0, 10.0, 10.0] ],
'M4' : [ [ 0.99, 36.0, 40.0, 48.0, 58.0, 76.0, 79.0, 95.1], [0.0006, 0.09, 0.07, 0.48, 1.0, 4.0, 2.7, 4.7] ],
'M5' : [ [ 0.99, 36.0, 40.0, 48.0, 58.0, 76.0, 79.0, 95.1], [0.0006, 0.09, 0.07, 0.48, 0.87, 2.2, 2.5, 4.3] ],
'N1' : [ [ 0.99, 30.0, 40.0, 47.0, 50.0, 63.0, 80.0, 95.1], [0.001, 0.001, 6.2, 7.0, 3.2, 12.0, 16.0, 13.0] ],
'N2' : [ [ 0.99, 40.0, 42.0, 49.0, 54.0, 70.0, 87.0, 95.1], [0.001, 0.001, 1.9, 16.0, 2.7, 13.0, 13.0, 8.0] ],
'N3' : [ [ 0.99, 40.0, 42.0, 49.0, 54.0, 70.0, 87.0, 95.1], [0.001, 0.001, 1.9, 16.0, 2.7, 13.0, 13.0, 8.0] ],
'N4' : [ [ 0.99, 40.0, 50.0, 55.0, 60.0, 70.0, 81.0, 95.1], [0.001, 0.001, 0.15, 0.1, 0.8, 8.0, 8.0, 5.0] ],
'N5' : [ [ 0.99, 40.0, 50.0, 55.0, 60.0, 70.0, 81.0, 95.1], [0.001, 0.001, 0.15, 0.1, 0.8, 8.0, 8.0, 5.0] ],
'N6' : [ [ 0.99, 71.0, 73.0, 79.0, 86.0, 90.0, 95.0, 100.0], [0.001, 0.001, 0.05, 0.22, 0.1, 0.16, 0.5, 0.9] ],
'N7' : [ [ 0.99, 71.0, 73.0, 79.0, 86.0, 90.0, 95.0, 100.0], [0.001, 0.001, 0.05, 0.22, 0.1, 0.16, 0.5, 0.9] ]}
def getGamach(Z,EdgeKind):
"""getGamma function to obtain the width of core hole level
input:
Z: atomic number (integer)
EdgeKind: kind of edge (string)
output:
width: core hole in eV
example:
getGamach(40,"L1")
>> 8.790...."""
gamach_spline = interpolate.splrep(gamach[EdgeKind][0], gamach[EdgeKind][1], s=0)
return interpolate.splev(Z,gamach_spline)
bond_distances={'H-H' :.74,'H-B' :1.19,'H-C' :1.09,'H-Si':1.48,'H-Ge':1.53,'H-Sn':1.70,'H-N' :1.01,'H-P' :1.44,'H-As':1.52,
'H-O' :0.96,'H-S' :1.34,'H-Se':1.46,'H-Te':1.70,'H-F' :0.92,'H-Cl':1.27,'H-Br':1.41,'H-I' :1.61,
'B-Cl' :1.75,
'C-Cl':1.54,'C--C':1.34,'C---C':1.20,'C-Si':1.85,'C-Ge':1.95,'C-Sn':2.16,'C-Pb':2.30,'C--N':1.47,'C--N':1.29,'C---N':1.16,
'C-P':1.84,'C-O':1.43,'C--O':1.20,'C---O':1.13,'C-S':1.82,'C--S':1.60,'C-F':1.35,'C-Cl':1.77,'C-Br':1.94,'C-Si':2.14,
'Si-Si':2.33,'Si-O':1.63,'Si-S':2.00,'Si-F':1.60,'Si-Cl':2.02,'Si-Br':2.15,'Si-I':2.43,
'Ge-Ge':2.41,'Ge-F':1.68,'Ge-Cl':2.10,'Ge-Br':2.30,
'Sn-Cl':2.33,'Sn-Br':2.50,'Sn-I':2.70,
'Pb-Cl':2.42,'Pb-I':2.79,
'N-N':1.45,'N--N':1.25,'N---N':1.10,'N-O':1.40,'N--O':1.21,'N-F':1.36,'N-Cl':1.75,
'P-P':2.21,'P-O':1.63,'P--O':1.50,'P--S':1.86,'P-F':1.54,'P-Cl':2.03,
'As-As':2.43,'As-O':1.78,'As-F':1.71,'As-Cl':2.16,'As-Br':2.33,'As-I':2.54,
'Sb-Cl':2.32,
'O-O':1.48,'O-O':1.21,'O-F':1.42,
'S--O':1.43,'S-S':2.05,'S--S':1.49,'S-F':1.56,'S-Cl':2.07,
'Se--Se':2.15,
'F-F':1.42,
'Cl-Cl':1.99,
'Br-Br':2.28,
'I--I':2.67,'I-F':1.91,'I-Cl':2.32}
dspacing = {"Si 111": 3.13467, "Si 311" :1.63702, "Si 511" :1.04514 }
elem_wave = {'Cr': {'Ka':2.29100, 'Ka1':2.28970, 'Ka2':2.29361, 'Kb':2.08487},
'Fe': {'Ka':1.93736, 'Ka1':1.93604, 'Ka2':1.93998, 'Kb':1.75661},
'Co': {'Ka':1.79026, 'Ka1':1.78897, 'Ka2':1.79285, 'Kb':1.62079},
'Cu': {'Ka':1.54184, 'Ka1':1.54056, 'Ka2':1.54439, 'Kb':1.39222},
'Mo': {'Ka':0.71073, 'Ka1':0.70930, 'Ka2':0.71359, 'Kb':0.63229}}
| 2.71875 | 3 |
test/test_navmesh.py | WGW101/habitat_sim2real | 0 | 12771219 | <filename>test/test_navmesh.py
import habitat_sim
from habitat_sim.agent import ActionSpec, ActuationSpec
sim_cfg = habitat_sim.SimulatorConfiguration()
sim_cfg.scene.id = "data/test/4d6ab9cc04f24c87aed802698f957b7a.glb"
sim_cfg.allow_sliding = False
ag_cfg = habitat_sim.agent.AgentConfiguration()
color_spec = habitat_sim.SensorSpec()
color_spec.uuid = "color_sensor"
color_spec.sensor_type = habitat_sim.SensorType.COLOR
color_spec.resolution = [480, 640]
color_spec.position = 0.047 * habitat_sim.geo.FRONT \
+ 0.015 * habitat_sim.geo.LEFT \
+ 0.589 * habitat_sim.geo.UP
color_spec.parameters["hfov"] = "69"
ag_cfg.radius = 0.18
ag_cfg.height = 0.65
ag_cfg.sensor_specifications = [color_spec]
ag_cfg.action_space = {k: ActionSpec(k, ActuationSpec(amount=v))
for k,v in (("move_forward", 0.25),
("turn_left", 10.0),
("turn_right", 10.0),
("look_up", 15.0),
("look_down", 15.0))}
cfg = habitat_sim.Configuration(sim_cfg, [ag_cfg])
sim = habitat_sim.Simulator(cfg)
navmesh_settings = habitat_sim.NavMeshSettings()
navmesh_settings.set_defaults()
sim.recompute_navmesh(sim.pathfinder, navmesh_settings, include_static_objects=False)
if sim.pathfinder.is_loaded:
sim.pathfinder.save_nav_mesh(sim_cfg.scene.id.replace(".glb", ".navmesh"))
| 2.03125 | 2 |
simulator/opendc-experiments/opendc-experiments-allocateam/tools/plot/metrics/job_makespan.py | allocateam/opendc | 0 | 12771220 | from .metric import Metric, metric_path
import pandas as pd
import math
class JobMakespanMetric(Metric):
def __init__(self, plot, scenarios):
super().__init__(plot, scenarios)
self.name = "job_makespan"
self.x_axis_label = "Job makespan (seconds)"
def get_data(self, scenario):
job_df = pd.read_parquet(metric_path("job-lifecycle", scenario))
task_df = pd.read_parquet(metric_path("task-lifecycle", scenario))
for job_id in job_df.job_id.unique():
tasks = task_df[task_df.job_id == job_id]
# job makespan: time elapsed from first-task submission of job until last completion of task from job
first_task_submission_time = tasks.submission_time.min()
last_task_finish_time = tasks.finish_time.max()
makespan = (last_task_finish_time - first_task_submission_time) // 1000
if math.isnan(makespan):
continue
yield makespan
| 2.796875 | 3 |
src/monitors/optim_monitors.py | isaachenrion/jets | 9 | 12771221 | <filename>src/monitors/optim_monitors.py
import torch
from .meta import Collect
class GradNorm(Collect):
def __init__(self, **kwargs):
super().__init__('grad_norm', fn='last', **kwargs)
def call(self, grads=None, **kwargs):
#grads = [p.grad for p in model.parameters()]
#grads = torch.cat([g.view(-1) for g in grads],0)
grad_norm = torch.norm(grads)
super().call(grad_norm=grad_norm)
return self.value
class ParamNorm(Collect):
def __init__(self, **kwargs):
super().__init__('param_norm', fn='last', **kwargs)
def call(self, model_params=None, **kwargs):
#model_params = list(model.parameters())
#model_params = torch.cat([p.view(-1) for p in model_params],0)
param_norm = torch.norm(model_params)
super().call(param_norm=param_norm)
return self.value
class GradVariance(Collect):
def __init__(self, **kwargs):
super().__init__('grad_variance', fn='last', **kwargs)
def call(self, grads=None, **kwargs):
#grads = [p.grad for p in model.parameters()]
#grads = torch.cat([g.view(-1) for g in grads],0)
grad_variance = torch.var(grads)
super().call(grad_variance=grad_variance)
return self.value
class ParamVariance(Collect):
def __init__(self, **kwargs):
super().__init__('param_variance', fn='last', **kwargs)
def call(self, model_params=None, **kwargs):
#model_params = list(model.parameters())
#model_params = torch.cat([p.view(-1) for p in model_params],0)
param_variance = torch.var(model_params)
super().call(param_variance=param_variance)
return self.value
class UpdateRatio(Collect):
def __init__(self, **kwargs):
super().__init__('update_ratio', fn='last', **kwargs)
def call(self, model_params=None, old_params=None, **kwargs):
#model_params = list(model.parameters())
#model_params = torch.cat([p.view(-1) for p in model_params],0)
#param_variance = torch.var(model_params)
updates = model_params - old_params
update_norm = torch.norm(updates)
param_norm = torch.norm(model_params)
update_ratio = update_norm / param_norm
super().call(update_ratio=update_ratio)
return self.value
| 2.234375 | 2 |
tests/test_app.py | jackwardell/Flask-Model-Management | 2 | 12771222 | <filename>tests/test_app.py
import pytest
from flask import Flask
from flask_model_management.domain import CRUD_OPERATIONS
from flask_model_management.manager import ModelManager as ModelManagement
from tests.models import Address
from tests.models import db
from tests.models import populate
from tests.models import User
MODELS_AND_COLUMNS = [
(model, [col.name for col in model.__table__.columns]) for model in (Address, User)
]
@pytest.fixture(scope="function")
def client_factory(sqlalchemy_url):
def factory(model=None, **kwargs):
mgmt = ModelManagement()
app = Flask(__name__)
app.config["SECRET_KEY"] = "hello world"
app.config["SQLALCHEMY_DATABASE_URI"] = sqlalchemy_url
db.init_app(app)
if model is not None:
mgmt.register_model(model, **kwargs)
mgmt.init_app(app, db=db)
with app.app_context():
db.create_all()
populate(db.session)
return app.test_client()
return factory
def test_app(client_factory):
client = client_factory()
resp = client.get("/model-management/")
assert resp.status_code == 200
@pytest.mark.parametrize("operation", CRUD_OPERATIONS)
@pytest.mark.parametrize("models_and_columns", MODELS_AND_COLUMNS)
def test_operation(client_factory, operation, models_and_columns):
model, columns = models_and_columns
client = client_factory(model)
resp = client.get(f"/model-management/{model.__tablename__}/{operation}")
assert resp.status_code == 200
for col in columns:
assert col in resp.data.decode()
# @pytest.mark.parametrize("operation", CRUD_OPERATIONS)
# @pytest.mark.parametrize("models_and_columns", MODELS_AND_COLUMNS)
# def test_operation_excluded(client_factory, operation, models_and_columns):
# model, columns = models_and_columns
#
# client = client_factory(model, excluded_operations=[operation])
# resp = client.get(f"/model-management/{model.__tablename__}/{operation}")
# assert resp.status_code == 404
#
# for crud_operation in CRUD_OPERATIONS - {operation}:
# resp = client.get(f"/model-management/{model.__tablename__}/{crud_operation}")
# assert resp.status_code == 200
# @pytest.mark.parametrize("operation", CRUD_OPERATIONS)
# @pytest.mark.parametrize("models_and_columns", MODELS_AND_COLUMNS)
# def test_column_excluded(client_factory, operation, models_and_columns):
# model, columns = models_and_columns
#
# for col in columns:
# client = client_factory(model, excluded_columns=[col])
# resp = client.get(f"/model-management/{model.__tablename__}/{operation}")
# assert resp.status_code == 200
#
# assert col not in resp.data.decode()
# # for c in columns:
# # if c != col:
# # assert c in resp.data.decode()
| 2.53125 | 3 |
model.py | rethinkdb/rethinkdb-example-webpy-blog | 16 | 12771223 | <gh_stars>10-100
# The [web.py](http://webpy.org/) (really basic) [blog example](http://webpy.org/src/blog/0.3)
# using **RethinkDB as the backend for web.py applications**.
#
# For details about the complete stack, installation, and running the app see
# the [README](https://github.com/rethinkdb/rethinkdb-example-webpy-blog).
import os, socket, sys, time
import web
from contextlib import contextmanager
import rethinkdb as r
from rethinkdb.errors import RqlRuntimeError
#### Connection details
# We will use these settings later in the code to
# connect to the RethinkDB server.
RDB_CONFIG = {
'host' : os.getenv('RDB_HOST', 'localhost'),
'port' : os.getenv('RDB_PORT', 28015),
'db' : os.getenv('RDB_DB', 'webpy'),
'table': os.getenv('RDB_TABLE', 'blogposts')
}
# The `Connection` object returned by [`r.connect`](http://www.rethinkdb.com/api/python/connect/)
# is a [context manager](http://docs.python.org/2/library/stdtypes.html#typecontextmanager)
# that can be used with the `with` statements.
def connection():
return r.connect(host=RDB_CONFIG['host'], port=RDB_CONFIG['port'],
db=RDB_CONFIG['db'])
#### Listing existing posts
# To retrieve all existing tasks, we are using the
# [`r.table`](http://www.rethinkdb.com/api/python/table/)
# command to query the database in response to a GET request from the
# browser. We also [`order_by`](http://www.rethinkdb.com/api/python/order_by/)
# the `posted_at` attribute in a descending manner.
#
# Running the query returns an iterator that automatically streams
# data from the server in efficient batches.
def get_posts():
with connection() as conn:
return (r.table(RDB_CONFIG['table'])
.order_by(r.desc('posted_at')).run(conn))
#### Creating a new post
# We create a new blog entry using
# [`insert`](http://www.rethinkdb.com/api/python/insert/).
#
# The `insert` operation returns a single object specifying the number
# of successfully created objects and their corresponding IDs:
#
# ```
# {
# "inserted": 1,
# "errors": 0,
# "generated_keys": [
# "<KEY>"
# ]
# }
# ```
def new_post(title, text):
new_post = {'title': title,
'content': text,
'posted_at': time.time(),
'last_modified': time.time()
}
with connection() as conn:
result = r.table(RDB_CONFIG['table']).insert(new_post).run(conn)
if result['inserted'] == 1:
new_post['id'] = result['generated_keys'][0]
return new_post
else:
return None
#### Retrieving a single post
# Every new post gets assigned a unique ID. The browser can retrieve
# a specific task by GETing `/view/<post_id>`. To query the database
# for a single document by its ID, we use the
# [`get`](http://www.rethinkdb.com/api/python/get/)
# command.
def get_post(id):
with connection() as conn:
return r.table(RDB_CONFIG['table']).get(id).run(conn)
#### Updating a post
# To update the post we'll use the
# [`update`](http://www.rethinkdb.com/api/python/update/)
# command, which will merge the JSON object stored in the database with the
# new one.
#
# The `update` operation returns an object specifying how many rows
# have been updated.
def update_post(id, title, text):
with connection() as conn:
result = (r.table(RDB_CONFIG['table']).get(id)
.update({'title': title, 'content': text,
'last_modified': time.time()})
.run(conn))
return result.get('modified', 0) == 1
#### Deleting a post
# To delete a post we'll call a
# [`delete`](http://www.rethinkdb.com/api/python/delete/)
# command.
#
# The `delete` operation returns an object specifying how many
# rows have been deleted.
def del_post(id):
with connection() as conn:
result = r.table(RDB_CONFIG['table']).get(id).delete().run(conn)
return result.get('deleted', 0) == 1
#### Database setup
# The app will use the table `blogposts` in the database `webpy`.
# You can override these defaults by defining the `RDB_DB` and `RDB_TABLE`
# env variables.
#
# We'll create the database and table here using
# [`db_create`](http://www.rethinkdb.com/api/python/db_create/)
# and
# [`table_create`](http://www.rethinkdb.com/api/python/table_create/)
# commands.
def dbSetup():
conn = r.connect(host=RDB_CONFIG['host'], port=RDB_CONFIG['port'])
try:
r.db_create(RDB_CONFIG['db']).run(conn)
r.db(RDB_CONFIG['db']).table_create(RDB_CONFIG['table']).run(conn)
print 'Database setup completed. Now run the app without --setup.'
except RqlRuntimeError:
print ('App database already exists. Run the app like this: ',
'python blog.py')
finally:
conn.close()
# ### Best practices ###
#
# #### Managing connections: a connection per request ####
#
# The RethinkDB server doesn't use a thread-per-connnection approach
# so opening connections per request will not slow down your database.
#
# #### Fetching multiple rows: batched iterators ####
#
# When fetching multiple rows from a table, RethinkDB returns a
# batched iterator initially containing a subset of the complete
# result. Once the end of the current batch is reached, a new batch is
# automatically retrieved from the server. From a coding point of view
# this is transparent:
#
# for result in r.table('blogposts').run(conn):
# print result
#
#
# #### `update` vs `replace` ####
#
# Both [`update`](http://www.rethinkdb.com/api/python/update/) and
# [`replace`](http://www.rethinkdb.com/api/python/replace/)
# operations can be used to modify one or multiple rows. Their behavior is different:
#
# * `update` will merge existing rows with the new values
# * `replace` will completely replace the existing rows with new values
#
# Licensed under the MIT license: <http://opensource.org/licenses/mit-license.php>
#
# Copyright (c) 2012 RethinkDB
#
| 2.75 | 3 |
demo/demo.py | chupati/stanfordgap | 0 | 12771224 | from sg.StanfordGap import StanfordGap
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn import datasets
class StanfordGapDemo(object):
def run(self):
"""
Run the Stanford Gap Statistic Analysis on the iris data set presented in
http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_iris.html#sphx-glr-auto-examples-cluster-plot-cluster-iris-py
:return:
"""
np.random.seed(42)
iris = datasets.load_iris()
X = iris.data
gaps = np.zeros((20, 1))
s = np.zeros((20, 1))
for k in range(0, 20):
est = KMeans(n_clusters=(k + 1))
est.fit(X)
sg = StanfordGap(B=10)
sg.fit(X, est.labels_, est.cluster_centers_)
gaps[k] = sg.gap
s[k] = sg.s
# Plot Gap(k)
# Choose the smallest k such that Gap(k)>=Gap(k+1) - s_(k+1)
plt.plot(gaps[0:18])
plt.plot(gaps[1:19] - s[1:19])
plt.legend(['Gap(k)', 'Gap(k+1) - s_k+1'])
plt.xticks(np.arange(20), np.arange(1, 20))
plt.xlabel('K')
plt.show()
| 3.078125 | 3 |
test/py/inters/test16.py | Ahdhn/lar-cc | 1 | 12771225 | <gh_stars>1-10
""" Generating the LAR of a set of non-intersecting cycles """
from larlib import *
sys.path.insert(0, 'test/py/inters/')
from test15 import *
cells = cellsFromCycles(latticeArray)
CV = AA(COMP([list,set,CAT]))(EVs)
EVdict = dict(zip(EV,range(len(EV))))
FE = [[EVdict[edge] for edge in cycle] for cycle in EVs]
edges = [CAT([FE[cycle] for cycle in cell]) for cell in cells]
FVs = [[CV[cycle] for cycle in cell] for cell in cells]
FV = AA(CAT)(FVs)
n = len(cells)
chains = allBinarySubsetsOfLenght(n)
cycles = STRUCT(MKPOLS((V,EV)))
csrBoundaryMat = larBoundary(FV,EV)
for chain in chains:
chainBoundary = COLOR(RED)(STRUCT(MKPOLS((V,[EV[e]
for e in chain2BoundaryChain(csrBoundaryMat)(chain)]))))
VIEW(STRUCT([cycles, chainBoundary]))
| 2.3125 | 2 |
epson_projector/main.py | xWizard360/epson_projector | 0 | 12771226 | <reponame>xWizard360/epson_projector<filename>epson_projector/main.py
"""Main of Epson projector module."""
import logging
from .const import (BUSY, TIMEOUT_TIMES)
from .lock import Lock
_LOGGER = logging.getLogger(__name__)
class Projector:
"""
Epson projector class.
Control your projector with Python.
"""
def __init__(self, host, websession=None, type='http', port=80,
encryption=False, loop=None, timeout_scale=1.0):
"""
Epson Projector controller.
:param str host: Hostname/IP/serial to the projector
:param obj websession: Websession to pass for HTTP protocol
:param int port: Port to connect to if using HTTP/TCP connection
:param bool encryption: User encryption to connect, only for HTTP.
:param obj loop: Asyncio loop to pass for TCP/serial connection
:param timeout_scale Factor to multiply default timeouts by (for slow projectors)
"""
self._lock = Lock()
self._type = type
self._timeout_scale = timeout_scale
if self._type == 'http':
self._host = host
from .projector_http import ProjectorHttp
self._projector = ProjectorHttp(host, websession,
port, encryption)
elif self._type == 'tcp':
from .projector_tcp import ProjectorTcp
self._host = host
self._projector = ProjectorTcp(host, port, loop)
elif self._type == 'serial':
from .projector_serial import ProjectorSerial
self._host = host
self._projector = ProjectorSerial(host, loop)
def close(self):
"""Close connection. Not used in HTTP"""
self._projector.close()
def set_timeout_scale(self, timeout_scale=1.0):
self._timeout_scale = timeout_scale
async def get_property(self, command):
"""Get property state from device."""
_LOGGER.debug("Getting property %s", command)
if self._lock.checkLock():
return BUSY
return await self._projector.get_property(command,
self.__get_timeout(command))
async def send_command(self, command):
"""Send command to Epson."""
_LOGGER.debug("Sending command to projector %s", command)
if self._lock.checkLock():
return False
self._lock.setLock(command)
return await self._projector.send_command(command,
self.__get_timeout(command))
def __get_timeout(self, command):
if command in TIMEOUT_TIMES:
return TIMEOUT_TIMES[command] * self._timeout_scale
else:
return TIMEOUT_TIMES['ALL'] * self._timeout_scale
| 2.78125 | 3 |
ds/beis_indicators/travel/make_travel_work.py | nestauk/beis-indicators | 4 | 12771227 | import logging
import numpy as np
import glob
from beis_indicators import project_dir
from beis_indicators.geo import NutsCoder, LepCoder
from beis_indicators.indicators import points_to_indicator, save_indicator
from beis_indicators.travel.travel_work_processing import get_travel_work_data
import pandas as pd
logger = logging.getLogger(__name__)
coders = {
'nuts2': NutsCoder(level=2),
'nuts3': NutsCoder(level=3),
'lep': LepCoder()
}
get_travel_work_data()
destination_df = pd.read_csv(f'{project_dir}/data/interim/travel_to_work_all_years.csv')
for geo, coder in coders.items():
time_mean = points_to_indicator(destination_df, value_col='Mean', coder=coder,
aggfunc=np.mean, value_rename= 'Mean',
projection='EPSG:4326', x_col='long', y_col='lat')
if geo == 'lep':
time_mean = time_mean.rename(columns = {'Mean': 'travel_time_to_work'}).sort_values(['lep_id', 'year']).reset_index(drop=True)
else:
time_mean = time_mean.rename(columns = {'Mean': 'travel_time_to_work'}).sort_values(['nuts_id', 'year']).reset_index(drop=True)
# print(time_mean.head())
save_indicator(time_mean, 'travel', geo)
| 2.28125 | 2 |
snake4.py | JoaoBatistaJr/Pygame-SnakeGame | 0 | 12771228 | <gh_stars>0
import pygame, random
from pygame.locals import *
def on_grid_random():
x = random.randint(0,59)
y = random.randint(0,59)
return (x * 10, y * 10)
def collision(c1, c2):
return (c1[0] == c2[0]) and (c1[1] == c2[1])
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
#Definição das cores
BRANCO = pygame.Color('white')
VERMELHO = pygame.Color('red')
#Método de inicialização
pygame.init()
screen = pygame.display.set_mode((540,480))
pygame.display.set_caption('Snake')
#Entidades
snake = [(200, 200), (210, 200), (220, 200)]
snake_skin = pygame.Surface((10,10))
snake_skin.fill(BRANCO) #corBranca
apple_pos = on_grid_random()
apple = pygame.Surface((10,10))
apple.fill(VERMELHO) #corVermelho
#Direções
direcao = LEFT
#Limite de FPS
clock = pygame.time.Clock()
#fonte e placar
font = pygame.font.Font('freesansbold.ttf', 20)
score = 0
#Loop do game.
game_over = False
while not game_over:
clock.tick(20)#Velocidade da snake
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
#Mapeamento do teclado
if event.type == KEYDOWN:
if event.key == K_UP and direcao != DOWN:
direcao = UP
if event.key == K_DOWN and direcao != UP:
direcao = DOWN
if event.key == K_RIGHT and direcao != LEFT:
direcao = RIGHT
if event.key == K_LEFT and direcao != RIGHT:
direcao = LEFT
#Teste de colisões
if collision(snake[0], apple_pos):
apple_pos = on_grid_random()
snake.append((0,0))
score = score + 1
#Colisão com as paredes
if snake[0][0] == 540 or snake[0][1] == 480 or snake[0][0] < 0 or snake[0][1] < 0:
game_over = True
break
#Colisão da snake com ela mesma.
for i in range(1, len(snake) -1):
if snake[0][0] == snake[i][0] and snake[0][1] == snake[i][1]:
game_over = True
break
if game_over:
break
#Colisão com a maçã
for i in range(len(snake) - 1, 0, -1):
snake[i] = (snake[i-1][0], snake[i-1][1])
#Mapeando as posições da snake
if direcao == UP:
snake[0] = (snake[0][0], snake[0][1] - 10)
if direcao == DOWN:
snake[0] = (snake[0][0], snake[0][1] + 10)
if direcao == RIGHT:
snake[0] = (snake[0][0] + 10, snake[0][1])
if direcao == LEFT:
snake[0] = (snake[0][0] - 10, snake[0][1])
#Limpa a tela
screen.fill((0,0,0))
#Desenha a apple na tela
screen.blit(apple, apple_pos)
#Desenha o grid
for x in range(0, 540, 10): #Linhas horizontais
pygame.draw.line(screen, (40,40,40), (x,0), (x, 540))
for y in range(0, 540, 10): #Linhas verticais
pygame.draw.line(screen, (40,40,40), (0,y), (540,y))
#Desenha o placar
score_font = font.render('Score: %s' %(score), True, (BRANCO))
score_rect = score_font.get_rect()
score_rect.topleft = (540 - 120, 10)
screen.blit(score_font, score_rect)
for pos in snake:
#Desenha a snake na tela
screen.blit(snake_skin,pos)
#Renderiza o jogo
pygame.display.update()
while True:
game_over_font = pygame.font.Font('freesansbold.ttf', 75)
game_over_screen = game_over_font.render('Game Over', True, (BRANCO))
game_over_rect = game_over_screen.get_rect()
game_over_rect.midtop = (540 / 2, 10)
screen.blit(game_over_screen, game_over_rect)
pygame.display.update()
pygame.time.wait(500)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit() | 3.25 | 3 |
ddtrace/internal/module.py | ysk24ok/dd-trace-py | 0 | 12771229 | from collections import defaultdict
from os.path import abspath
from os.path import expanduser
from os.path import isdir
from os.path import isfile
from os.path import join
import sys
from types import ModuleType
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Set
from typing import Union
from typing import cast
from ddtrace.internal.compat import PY2
from ddtrace.internal.logger import get_logger
from ddtrace.internal.utils import get_argument_value
log = get_logger(__name__)
ModuleHookType = Callable[[ModuleType], None]
_run_code = None
_post_run_module_hooks = [] # type: List[ModuleHookType]
def _wrapped_run_code(*args, **kwargs):
# type: (*Any, **Any) -> Dict[str, Any]
global _run_code, _post_run_module_hooks
# DEV: If we are calling this wrapper then _run_code must have been set to
# the original runpy._run_code.
assert _run_code is not None
mod_name = get_argument_value(args, kwargs, 3, "mod_name")
try:
return _run_code(*args, **kwargs)
finally:
module = sys.modules[mod_name]
for hook in _post_run_module_hooks:
hook(module)
def _patch_run_code():
# type: () -> None
global _run_code
if _run_code is None:
import runpy
_run_code = runpy._run_code # type: ignore[attr-defined]
runpy._run_code = _wrapped_run_code # type: ignore[attr-defined]
def register_post_run_module_hook(hook):
# type: (ModuleHookType) -> None
"""Register a post run module hook.
The hooks gets called after the module is loaded. For this to work, the
hook needs to be registered during the interpreter initialization, e.g. as
part of a sitecustomize.py script.
"""
global _run_code, _post_run_module_hooks
_patch_run_code()
_post_run_module_hooks.append(hook)
def unregister_post_run_module_hook(hook):
# type: (ModuleHookType) -> None
"""Unregister a post run module hook.
If the hook was not registered, a ``ValueError`` exception is raised.
"""
global _post_run_module_hooks
_post_run_module_hooks.remove(hook)
def origin(module):
# type: (ModuleType) -> str
"""Get the origin source file of the module."""
try:
orig = abspath(module.__file__) # type: ignore[type-var]
except (AttributeError, TypeError):
# Module is probably only partially initialised, so we look at its
# spec instead
try:
orig = abspath(module.__spec__.origin) # type: ignore
except (AttributeError, ValueError, TypeError):
orig = None
if orig is not None and isfile(orig):
if orig.endswith(".pyc"):
orig = orig[:-1]
return orig
return "<unknown origin>"
def _resolve(path):
# type: (str) -> Optional[str]
"""Resolve a (relative) path with respect to sys.path."""
for base in sys.path:
if isdir(base):
resolved_path = abspath(join(base, expanduser(path)))
if isfile(resolved_path):
return resolved_path
return None
# Borrowed from the wrapt module
# https://github.com/GrahamDumpleton/wrapt/blob/df0e62c2740143cceb6cafea4c306dae1c559ef8/src/wrapt/importer.py
if PY2:
find_spec = ModuleSpec = None
Loader = object
else:
from importlib.abc import Loader
from importlib.machinery import ModuleSpec
from importlib.util import find_spec
# DEV: This is used by Python 2 only
class _ImportHookLoader(object):
def __init__(self, callback):
# type: (Callable[[ModuleType], None]) -> None
self.callback = callback
def load_module(self, fullname):
# type: (str) -> ModuleType
module = sys.modules[fullname]
self.callback(module)
return module
class _ImportHookChainedLoader(Loader):
def __init__(self, loader, callback):
# type: (Loader, Callable[[ModuleType], None]) -> None
self.loader = loader
self.callback = callback
# DEV: load_module is deprecated so we define it at runtime if also
# defined by the default loader. We also check and define for the
# methods that are supposed to replace the load_module functionality.
if hasattr(loader, "load_module"):
self.load_module = self._load_module # type: ignore[assignment]
if hasattr(loader, "create_module"):
self.create_module = self._create_module # type: ignore[assignment]
if hasattr(loader, "exec_module"):
self.exec_module = self._exec_module # type: ignore[assignment]
def _load_module(self, fullname):
# type: (str) -> ModuleType
module = self.loader.load_module(fullname)
self.callback(module)
return module
def _create_module(self, spec):
return self.loader.create_module(spec)
def _exec_module(self, module):
self.loader.exec_module(module)
self.callback(sys.modules[module.__name__])
def get_code(self, mod_name):
return self.loader.get_code(mod_name)
class ModuleWatchdog(dict):
"""Module watchdog.
Replace the standard ``sys.modules`` dictionary to detect when modules are
loaded/unloaded. This is also responsible for triggering any registered
import hooks.
Subclasses might customize the default behavior by overriding the
``after_import`` method, which is triggered on every module import, once
the subclass is installed.
"""
_instance = None # type: Optional[ModuleWatchdog]
def __init__(self):
# type: () -> None
self._hook_map = defaultdict(list) # type: DefaultDict[str, List[ModuleHookType]]
self._origin_map = {origin(module): module for module in sys.modules.values()}
self._modules = sys.modules # type: Union[dict, ModuleWatchdog]
self._finding = set() # type: Set[str]
def __getitem__(self, item):
# type: (str) -> ModuleType
return self._modules.__getitem__(item)
def __setitem__(self, name, module):
# type: (str, ModuleType) -> None
self._modules.__setitem__(name, module)
def _add_to_meta_path(self):
# type: () -> None
sys.meta_path.insert(0, self) # type: ignore[arg-type]
@classmethod
def _find_in_meta_path(cls):
# type: () -> Optional[int]
for i, meta_path in enumerate(sys.meta_path):
if type(meta_path) is cls:
return i
return None
@classmethod
def _remove_from_meta_path(cls):
# type: () -> None
i = cls._find_in_meta_path()
if i is not None:
sys.meta_path.pop(i)
def after_import(self, module):
# type: (ModuleType) -> None
path = origin(module)
self._origin_map[path] = module
# Collect all hooks by module origin and name
hooks = []
if path in self._hook_map:
hooks.extend(self._hook_map[path])
if module.__name__ in self._hook_map:
hooks.extend(self._hook_map[module.__name__])
if hooks:
log.debug("Calling %d registered hooks on import of module '%s'", len(hooks), module.__name__)
for hook in hooks:
hook(module)
@classmethod
def get_by_origin(cls, origin):
# type: (str) -> Optional[ModuleType]
"""Lookup a module by its origin."""
cls._check_installed()
path = _resolve(origin)
if path is not None:
return cls._instance._origin_map.get(path) # type: ignore[union-attr]
return None
def __delitem__(self, name):
# type: (str) -> None
try:
path = origin(sys.modules[name])
# Drop the module reference to reclaim memory
del self._origin_map[path]
except KeyError:
pass
self._modules.__delitem__(name)
def __getattribute__(self, name):
# type: (str) -> Any
try:
return super(ModuleWatchdog, self).__getattribute__("_modules").__getattribute__(name)
except AttributeError:
return super(ModuleWatchdog, self).__getattribute__(name)
def __contains__(self, name):
# type: (object) -> bool
return self._modules.__contains__(name)
def __len__(self):
# type: () -> int
return self._modules.__len__()
def __iter__(self):
# type: () -> Iterator
return self._modules.__iter__()
def find_module(self, fullname, path=None):
# type: (str, Optional[str]) -> Union[ModuleWatchdog, _ImportHookChainedLoader, None]
if fullname in self._finding:
return None
self._finding.add(fullname)
try:
if PY2:
__import__(fullname)
return _ImportHookLoader(self.after_import)
loader = getattr(find_spec(fullname), "loader", None)
if loader and not isinstance(loader, _ImportHookChainedLoader):
return _ImportHookChainedLoader(loader, self.after_import)
finally:
self._finding.remove(fullname)
return None
def find_spec(self, fullname, path=None, target=None):
# type: (str, Optional[str], Optional[ModuleType]) -> Optional[ModuleSpec]
if fullname in self._finding:
return None
self._finding.add(fullname)
try:
spec = find_spec(fullname)
if spec is None:
return None
loader = getattr(spec, "loader", None)
if loader and not isinstance(loader, _ImportHookChainedLoader):
spec.loader = _ImportHookChainedLoader(loader, self.after_import)
return spec
finally:
self._finding.remove(fullname)
@classmethod
def register_origin_hook(cls, origin, hook):
# type: (str, ModuleHookType) -> None
"""Register a hook to be called when the module with the given origin is
imported.
The hook will be called with the module object as argument.
"""
cls._check_installed()
# DEV: Under the hypothesis that this is only ever called by the probe
# poller thread, there are no further actions to take. Should this ever
# change, then thread-safety might become a concern.
path = _resolve(origin)
if path is None:
raise ValueError("Cannot resolve module origin %s" % origin)
log.debug("Registering hook '%r' on path '%s'", hook, path)
instance = cast(ModuleWatchdog, cls._instance)
instance._hook_map[path].append(hook)
try:
module = instance._origin_map[path]
except KeyError:
# The module is not loaded yet. Nothing more we can do.
return
# The module was already imported so we invoke the hook straight-away
log.debug("Calling hook '%r' on already imported module '%s'", hook, module.__name__)
hook(module)
@classmethod
def unregister_origin_hook(cls, origin, hook):
# type: (str, Any) -> None
"""Unregister the hook registered with the given module origin and
argument.
"""
cls._check_installed()
path = _resolve(origin)
if path is None:
raise ValueError("Module origin %s cannot be resolved", origin)
instance = cast(ModuleWatchdog, cls._instance)
if path not in instance._hook_map:
raise ValueError("No hooks registered for origin %s" % origin)
try:
if path in instance._hook_map:
hooks = instance._hook_map[path]
hooks.remove(hook)
if not hooks:
del instance._hook_map[path]
except ValueError:
raise ValueError("Hook %r not registered for origin %s" % (hook, origin))
@classmethod
def register_module_hook(cls, module, hook):
# type: (str, ModuleHookType) -> None
"""Register a hook to be called when the module with the given name is
imported.
The hook will be called with the module object as argument.
"""
cls._check_installed()
log.debug("Registering hook '%r' on module '%s'", hook, module)
instance = cast(ModuleWatchdog, cls._instance)
instance._hook_map[module].append(hook)
try:
module_object = instance[module]
except KeyError:
# The module is not loaded yet. Nothing more we can do.
return
# The module was already imported so we invoke the hook straight-away
log.debug("Calling hook '%r' on already imported module '%s'", hook, module)
hook(module_object)
@classmethod
def unregister_module_hook(cls, module, hook):
# type: (str, ModuleHookType) -> None
"""Unregister the hook registered with the given module name and
argument.
"""
cls._check_installed()
instance = cast(ModuleWatchdog, cls._instance)
if module not in instance._hook_map:
raise ValueError("No hooks registered for module %s" % module)
try:
if module in instance._hook_map:
hooks = instance._hook_map[module]
hooks.remove(hook)
if not hooks:
del instance._hook_map[module]
except ValueError:
raise ValueError("Hook %r not registered for module %r" % (hook, module))
@classmethod
def _check_installed(cls):
# type: () -> None
if not cls.is_installed():
raise RuntimeError("%s is not installed" % cls.__name__)
@classmethod
def install(cls):
# type: () -> None
"""Install the module watchdog."""
if cls.is_installed():
raise RuntimeError("%s is already installed" % cls.__name__)
cls._instance = sys.modules = cls()
sys.modules._add_to_meta_path()
log.debug("%s installed", cls)
@classmethod
def is_installed(cls):
"""Check whether this module watchdog class is installed."""
return cls._instance is not None and type(cls._instance) is cls
@classmethod
def uninstall(cls):
# type: () -> None
"""Uninstall the module watchdog.
This will uninstall only the most recently installed instance of this
class.
"""
cls._check_installed()
parent, current = None, sys.modules
while isinstance(current, ModuleWatchdog):
if type(current) is cls:
cls._remove_from_meta_path()
if parent is not None:
setattr(parent, "_modules", getattr(current, "_modules"))
else:
sys.modules = getattr(current, "_modules")
cls._instance = None
log.debug("ModuleWatchdog uninstalled")
return
parent = current
current = current._modules
| 1.929688 | 2 |
test/variational/test_batch_decoupled_variational_strategy.py | nzw0301/gpytorch | 0 | 12771230 | <gh_stars>0
#!/usr/bin/env python3
import unittest
import torch
import gpytorch
from gpytorch.test.variational_test_case import VariationalTestCase
def likelihood_cls():
return gpytorch.likelihoods.GaussianLikelihood()
def strategy_cls(model, inducing_points, variational_distribution, learn_inducing_locations):
return gpytorch.variational.BatchDecoupledVariationalStrategy(
model, inducing_points, variational_distribution, learn_inducing_locations
)
def batch_dim_strategy_cls(model, inducing_points, variational_distribution, learn_inducing_locations):
return gpytorch.variational.BatchDecoupledVariationalStrategy(
model, inducing_points, variational_distribution, learn_inducing_locations, mean_var_batch_dim=-1
)
class TestBatchDecoupledVariationalGP(VariationalTestCase, unittest.TestCase):
@property
def batch_shape(self):
return torch.Size([])
@property
def distribution_cls(self):
return gpytorch.variational.CholeskyVariationalDistribution
@property
def likelihood_cls(self):
return likelihood_cls
@property
def mll_cls(self):
return gpytorch.mlls.VariationalELBO
@property
def strategy_cls(self):
return strategy_cls
def test_training_iteration(self, *args, **kwargs):
cg_mock, cholesky_mock = super().test_training_iteration(*args, **kwargs)
self.assertFalse(cg_mock.called)
self.assertEqual(cholesky_mock.call_count, 2) # One for each forward pass, and for computing prior dist
def test_eval_iteration(self, *args, **kwargs):
cg_mock, cholesky_mock = super().test_eval_iteration(*args, **kwargs)
self.assertFalse(cg_mock.called)
self.assertEqual(cholesky_mock.call_count, 1) # One to compute cache, that's it!
class TestBatchDecoupledPredictiveGP(TestBatchDecoupledVariationalGP):
@property
def mll_cls(self):
return gpytorch.mlls.PredictiveLogLikelihood
class TestBatchDecoupledRobustVGP(TestBatchDecoupledVariationalGP):
@property
def mll_cls(self):
return gpytorch.mlls.GammaRobustVariationalELBO
class TestMeanFieldBatchDecoupledVariationalGP(TestBatchDecoupledVariationalGP):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestMeanFieldBatchDecoupledPredictiveGP(TestBatchDecoupledPredictiveGP):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestMeanFieldBatchDecoupledRobustVGP(TestBatchDecoupledRobustVGP):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestBatchDecoupledVariationalGPBatchDim(TestBatchDecoupledVariationalGP, unittest.TestCase):
def _make_model_and_likelihood(
self,
num_inducing=16,
batch_shape=torch.Size([]),
inducing_batch_shape=torch.Size([]),
strategy_cls=gpytorch.variational.VariationalStrategy,
distribution_cls=gpytorch.variational.CholeskyVariationalDistribution,
constant_mean=True,
):
class _SVGPRegressionModel(gpytorch.models.ApproximateGP):
def __init__(self, inducing_points):
variational_distribution = distribution_cls(num_inducing, batch_shape=batch_shape)
variational_strategy = strategy_cls(
self, inducing_points, variational_distribution, learn_inducing_locations=True
)
super().__init__(variational_strategy)
if constant_mean:
self.mean_module = gpytorch.means.ConstantMean(batch_shape=batch_shape + torch.Size([2]))
self.mean_module.initialize(constant=1.0)
else:
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(batch_shape=batch_shape + torch.Size([2])),
batch_shape=batch_shape + torch.Size([2]),
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
return latent_pred
inducing_points = torch.randn(num_inducing, 2).repeat(*inducing_batch_shape, 1, 1)
return _SVGPRegressionModel(inducing_points), self.likelihood_cls()
@property
def distribution_cls(self):
return gpytorch.variational.CholeskyVariationalDistribution
@property
def mll_cls(self):
return gpytorch.mlls.PredictiveLogLikelihood
class TestMeanFieldBatchDecoupledVariationalGPBatchDim(TestBatchDecoupledVariationalGPBatchDim, unittest.TestCase):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestBatchDecoupledVariationalGPOtherBatchDim(TestBatchDecoupledVariationalGP, unittest.TestCase):
def _make_model_and_likelihood(
self,
num_inducing=16,
batch_shape=torch.Size([]),
inducing_batch_shape=torch.Size([]),
strategy_cls=gpytorch.variational.VariationalStrategy,
distribution_cls=gpytorch.variational.CholeskyVariationalDistribution,
constant_mean=True,
):
class _SVGPRegressionModel(gpytorch.models.ApproximateGP):
def __init__(self, inducing_points):
variational_distribution = distribution_cls(num_inducing, batch_shape=batch_shape)
variational_strategy = strategy_cls(
self, inducing_points, variational_distribution, learn_inducing_locations=True
)
super().__init__(variational_strategy)
if constant_mean:
self.mean_module = gpytorch.means.ConstantMean(
batch_shape=batch_shape[:-1] + torch.Size([2]) + batch_shape[-1:]
)
self.mean_module.initialize(constant=1.0)
else:
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(batch_shape=batch_shape[:-1] + torch.Size([2]) + batch_shape[-1:]),
batch_shape=batch_shape[:-1] + torch.Size([2]) + batch_shape[-1:],
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
return latent_pred
inducing_points = torch.randn(num_inducing, 2).repeat(*inducing_batch_shape, 1, 1)
return _SVGPRegressionModel(inducing_points), self.likelihood_cls()
@property
def strategy_cls(self):
def _batch_dim_strategy_cls(model, inducing_points, variational_distribution, learn_inducing_locations):
return gpytorch.variational.BatchDecoupledVariationalStrategy(
model, inducing_points, variational_distribution, learn_inducing_locations, mean_var_batch_dim=-2
)
return _batch_dim_strategy_cls
@property
def batch_shape(self):
return torch.Size([3])
if __name__ == "__main__":
unittest.main()
| 2.265625 | 2 |
gpytorch/variational/ciq_variational_strategy.py | lrast/gpytorch | 2 | 12771231 | #!/usr/bin/env python3
from typing import Optional, Tuple
import torch
from .. import settings
from ..distributions import Delta, MultivariateNormal
from ..lazy import DiagLazyTensor, MatmulLazyTensor, SumLazyTensor, lazify
from ..module import Module
from ..utils import linear_cg
from ..utils.broadcasting import _mul_broadcast_shape
from ..utils.memoize import cached
from ._variational_strategy import _VariationalStrategy
from .natural_variational_distribution import NaturalVariationalDistribution
class _NgdInterpTerms(torch.autograd.Function):
"""
This function takes in
- the kernel interpolation term K_ZZ^{-1/2} k_ZX
- the natural parameters of the variational distribution
and returns
- the predictive distribution mean/covariance
- the inducing KL divergence KL( q(u) || p(u))
However, the gradients will be with respect to the **cannonical parameters**
of the variational distribution, rather than the **natural parameters**.
This corresponds to performing natural gradient descent on the variational distribution.
"""
@staticmethod
def forward(
ctx, interp_term: torch.Tensor, natural_vec: torch.Tensor, natural_mat: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# Compute precision
prec = natural_mat.mul(-2.0)
diag = prec.diagonal(dim1=-1, dim2=-2).unsqueeze(-1)
# Make sure that interp_term and natural_vec are the same batch shape
batch_shape = _mul_broadcast_shape(interp_term.shape[:-2], natural_vec.shape[:-1])
expanded_interp_term = interp_term.expand(*batch_shape, *interp_term.shape[-2:])
expanded_natural_vec = natural_vec.expand(*batch_shape, natural_vec.size(-1))
# Compute necessary solves with the precision. We need
# m = expec_vec = S * natural_vec
# S K^{-1/2} k
solves = linear_cg(
prec.matmul,
torch.cat([expanded_natural_vec.unsqueeze(-1), expanded_interp_term], dim=-1),
n_tridiag=0,
max_iter=settings.max_cg_iterations.value(),
tolerance=min(settings.eval_cg_tolerance.value(), settings.cg_tolerance.value()),
max_tridiag_iter=settings.max_lanczos_quadrature_iterations.value(),
preconditioner=lambda x: x / diag,
)
expec_vec = solves[..., 0]
s_times_interp_term = solves[..., 1:]
# Compute the interpolated mean
# k^T K^{-1/2} m
interp_mean = (s_times_interp_term.transpose(-1, -2) @ natural_vec.unsqueeze(-1)).squeeze(-1)
# Compute the interpolated variance
# k^T K^{-1/2} S K^{-1/2} k = k^T K^{-1/2} (expec_mat - expec_vec expec_vec^T) K^{-1/2} k
interp_var = (s_times_interp_term * interp_term).sum(dim=-2)
# Let's not bother actually computing the KL-div in the foward pass
# 1/2 ( -log | S | + tr(S) + m^T m - len(m) )
# = 1/2 ( -log | expec_mat - expec_vec expec_vec^T | + tr(expec_mat) - len(m) )
kl_div = torch.zeros_like(interp_mean[..., 0])
# We're done!
ctx.save_for_backward(interp_term, s_times_interp_term, interp_mean, natural_vec, expec_vec, prec)
return interp_mean, interp_var, kl_div
@staticmethod
def backward(
ctx, interp_mean_grad: torch.Tensor, interp_var_grad: torch.Tensor, kl_div_grad: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# Get the saved terms
interp_term, s_times_interp_term, interp_mean, natural_vec, expec_vec, prec = ctx.saved_tensors
# Expand data-depenedent gradients
interp_mean_grad = interp_mean_grad.unsqueeze(-2)
interp_var_grad = interp_var_grad.unsqueeze(-2)
# Compute gradient of interp term (K^{-1/2} k)
# interp_mean component: m
# interp_var component: S K^{-1/2} k
# kl component: 0
interp_term_grad = (interp_var_grad * s_times_interp_term).mul(2.0) + (
interp_mean_grad * expec_vec.unsqueeze(-1)
)
# Compute gradient of expected vector (m)
# interp_mean component: K^{-1/2} k
# interp_var component: (k^T K^{-1/2} m) K^{-1/2} k
# kl component: S^{-1} m
expec_vec_grad = sum(
[
(interp_var_grad * interp_mean.unsqueeze(-2) * interp_term).sum(dim=-1).mul(-2),
(interp_mean_grad * interp_term).sum(dim=-1),
(kl_div_grad.unsqueeze(-1) * natural_vec),
]
)
# Compute gradient of expected matrix (mm^T + S)
# interp_mean component: 0
# interp_var component: K^{-1/2} k k^T K^{-1/2}
# kl component: 1/2 ( I - S^{-1} )
eye = torch.eye(expec_vec.size(-1), device=expec_vec.device, dtype=expec_vec.dtype)
expec_mat_grad = torch.add(
(interp_var_grad * interp_term) @ interp_term.transpose(-1, -2),
(kl_div_grad.unsqueeze(-1).unsqueeze(-1) * (eye - prec).mul(0.5)),
)
# We're done!
return interp_term_grad, expec_vec_grad, expec_mat_grad, None # Extra "None" for the kwarg
class CiqVariationalStrategy(_VariationalStrategy):
r"""
Similar to :class:`~gpytorch.variational.VariationalStrategy`,
except the whitening operation is performed using Contour Integral Quadrature
rather than Cholesky (see `Pleiss et al. (2020)`_ for more info).
See the `CIQ-SVGP tutorial`_ for an example.
Contour Integral Quadrature uses iterative matrix-vector multiplication to approximate
the :math:`\mathbf K_{\mathbf Z \mathbf Z}^{-1/2}` matrix used for the whitening operation.
This can be more efficient than the standard variational strategy for large numbers
of inducing points (e.g. :math:`M > 1000`) or when the inducing points have structure
(e.g. they lie on an evenly-spaced grid).
.. note::
It is recommended that this object is used in conjunction with
:obj:`~gpytorch.variational.NaturalVariationalDistribution` and
`natural gradient descent`_.
:param ~gpytorch.models.ApproximateGP model: Model this strategy is applied to.
Typically passed in when the VariationalStrategy is created in the
__init__ method of the user defined model.
:param torch.Tensor inducing_points: Tensor containing a set of inducing
points to use for variational inference.
:param ~gpytorch.variational.VariationalDistribution variational_distribution: A
VariationalDistribution object that represents the form of the variational distribution :math:`q(\mathbf u)`
:param learn_inducing_locations: (Default True): Whether or not
the inducing point locations :math:`\mathbf Z` should be learned (i.e. are they
parameters of the model).
:type learn_inducing_locations: `bool`, optional
.. _Pleiss et al. (2020):
https://arxiv.org/pdf/2006.11267.pdf
.. _CIQ-SVGP tutorial:
examples/04_Variational_and_Approximate_GPs/SVGP_CIQ.html
.. _natural gradient descent:
examples/04_Variational_and_Approximate_GPs/Natural_Gradient_Descent.html
"""
def _ngd(self):
return isinstance(self._variational_distribution, NaturalVariationalDistribution)
@property
@cached(name="prior_distribution_memo")
def prior_distribution(self):
zeros = torch.zeros(
self._variational_distribution.shape(),
dtype=self._variational_distribution.dtype,
device=self._variational_distribution.device,
)
ones = torch.ones_like(zeros)
res = MultivariateNormal(zeros, DiagLazyTensor(ones))
return res
@property
@cached(name="variational_distribution_memo")
def variational_distribution(self):
if self._ngd():
raise RuntimeError(
"Variational distribution for NGD-CIQ should be computed during forward calls. "
"This is probably a bug in GPyTorch."
)
return super().variational_distribution
def forward(
self,
x: torch.Tensor,
inducing_points: torch.Tensor,
inducing_values: torch.Tensor,
variational_inducing_covar: Optional[MultivariateNormal] = None,
**kwargs,
) -> MultivariateNormal:
# Compute full prior distribution
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_covar = full_output.lazy_covariance_matrix
# Covariance terms
num_induc = inducing_points.size(-2)
test_mean = full_output.mean[..., num_induc:]
induc_induc_covar = full_covar[..., :num_induc, :num_induc].evaluate_kernel().add_jitter(1e-2)
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:].add_jitter(1e-4)
# Compute interpolation terms
# K_XZ K_ZZ^{-1} \mu_z
# K_XZ K_ZZ^{-1/2} \mu_Z
with settings.max_preconditioner_size(0): # Turn off preconditioning for CIQ
interp_term = lazify(induc_induc_covar).sqrt_inv_matmul(induc_data_covar)
# Compute interpolated mean and variance terms
# We have separate computation rules for NGD versus standard GD
if self._ngd():
interp_mean, interp_var, kl_div = _NgdInterpTerms().apply(
interp_term, self._variational_distribution.natural_vec, self._variational_distribution.natural_mat,
)
# Compute the covariance of q(f)
predictive_var = data_data_covar.diag() - interp_term.pow(2).sum(dim=-2) + interp_var
predictive_var = torch.clamp_min(predictive_var, settings.min_variance.value(predictive_var.dtype))
predictive_covar = DiagLazyTensor(predictive_var)
# Also compute and cache the KL divergence
if not hasattr(self, "_memoize_cache"):
self._memoize_cache = dict()
self._memoize_cache["kl"] = kl_div
else:
# Compute interpolated mean term
interp_mean = torch.matmul(
interp_term.transpose(-1, -2), (inducing_values - self.prior_distribution.mean).unsqueeze(-1)
).squeeze(-1)
# Compute the covariance of q(f)
middle_term = self.prior_distribution.lazy_covariance_matrix.mul(-1)
if variational_inducing_covar is not None:
middle_term = SumLazyTensor(variational_inducing_covar, middle_term)
predictive_covar = SumLazyTensor(
data_data_covar.add_jitter(1e-4),
MatmulLazyTensor(interp_term.transpose(-1, -2), middle_term @ interp_term),
)
# Compute the mean of q(f)
# k_XZ K_ZZ^{-1/2} (m - K_ZZ^{-1/2} \mu_Z) + \mu_X
predictive_mean = interp_mean + test_mean
# Return the distribution
return MultivariateNormal(predictive_mean, predictive_covar)
def kl_divergence(self):
"""
Compute the KL divergence between the variational inducing distribution :math:`q(\mathbf u)`
and the prior inducing distribution :math:`p(\mathbf u)`.
:rtype: torch.Tensor
"""
if self._ngd():
if hasattr(self, "_memoize_cache") and "kl" in self._memoize_cache:
return self._memoize_cache["kl"]
else:
raise RuntimeError(
"KL divergence for NGD-CIQ should be computed during forward calls."
"This is probably a bug in GPyTorch."
)
else:
return super().kl_divergence()
def __call__(self, x: torch.Tensor, prior: bool = False, **kwargs) -> MultivariateNormal:
# This is mostly the same as _VariationalStrategy.__call__()
# but with special rules for natural gradient descent (to prevent O(M^3) computation)
# If we're in prior mode, then we're done!
if prior:
return self.model.forward(x)
# Delete previously cached items from the training distribution
if self.training:
self._clear_cache()
# (Maybe) initialize variational distribution
if not self.variational_params_initialized.item():
if self._ngd():
noise = torch.randn_like(self.prior_distribution.mean).mul_(1e-3)
eye = torch.eye(noise.size(-1), dtype=noise.dtype, device=noise.device).mul(-0.5)
self._variational_distribution.natural_vec.data.copy_(noise)
self._variational_distribution.natural_mat.data.copy_(eye)
self.variational_params_initialized.fill_(1)
else:
prior_dist = self.prior_distribution
self._variational_distribution.initialize_variational_distribution(prior_dist)
self.variational_params_initialized.fill_(1)
# Ensure inducing_points and x are the same size
inducing_points = self.inducing_points
if inducing_points.shape[:-2] != x.shape[:-2]:
x, inducing_points = self._expand_inputs(x, inducing_points)
# Get q(f)
if self._ngd():
return Module.__call__(
self, x, inducing_points, inducing_values=None, variational_inducing_covar=None, **kwargs,
)
else:
# Get p(u)/q(u)
variational_dist_u = self.variational_distribution
if isinstance(variational_dist_u, MultivariateNormal):
return Module.__call__(
self,
x,
inducing_points,
inducing_values=variational_dist_u.mean,
variational_inducing_covar=variational_dist_u.lazy_covariance_matrix,
**kwargs,
)
elif isinstance(variational_dist_u, Delta):
return Module.__call__(
self,
x,
inducing_points,
inducing_values=variational_dist_u.mean,
variational_inducing_covar=None,
ngd=False,
**kwargs,
)
else:
raise RuntimeError(
f"Invalid variational distribuition ({type(variational_dist_u)}). "
"Expected a multivariate normal or a delta distribution."
)
| 2.25 | 2 |
core/migrations/0005_sitecontacted.py | andrsko/convabout-server-v1 | 0 | 12771232 | # Generated by Django 3.0.6 on 2020-06-26 09:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sessions', '0001_initial'),
('core', '0004_auto_20200603_1414'),
]
operations = [
migrations.CreateModel(
name='SiteContacted',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True, verbose_name='timestamp')),
('message', models.CharField(max_length=500)),
('name', models.CharField(blank=True, max_length=100, null=True)),
('email', models.EmailField(blank=True, max_length=100, null=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sessions.Session')),
],
),
]
| 1.757813 | 2 |
test/test_teams.py | Galtozzy/basketball_reference_scraper | 191 | 12771233 | <filename>test/test_teams.py
import unittest
from basketball_reference_scraper.teams import get_roster, get_team_stats, get_opp_stats, get_roster_stats, get_team_misc
class TestTeams(unittest.TestCase):
def test_get_roster(self):
df = get_roster('GSW', 2019)
curry_df = df[df['PLAYER']=='<NAME>']
self.assertEqual(len(curry_df), 1)
expected_columns = ['NUMBER', 'PLAYER', 'POS', 'HEIGHT', 'WEIGHT',
'BIRTH_DATE', 'NATIONALITY', 'EXPERIENCE', 'COLLEGE']
self.assertListEqual(list(df.columns), expected_columns)
def test_get_roster_on_missing_nationality(self):
df = get_roster('FTW', 1956)
expected_columns = ['NUMBER', 'PLAYER', 'POS', 'HEIGHT', 'WEIGHT',
'BIRTH_DATE', 'NATIONALITY', 'EXPERIENCE', 'COLLEGE']
self.assertListEqual(list(df.columns), expected_columns)
def get_team_stats(self):
series = get_team_stats('GSW', 2019)
expected_indices = ['G', 'MP', 'FG', 'FGA', 'FG%', '3P', '3PA', '3P%', '2P', '2PA', '2P%', 'FT', 'FTA', 'FT%', 'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS']
self.assertCountEqual(list(series.index), expected_indices)
def get_opp_stats(self):
series = get_opp_stats('GSW', 2019)
expected_indices = ['OPP_G', 'OPP_MP', 'OPP_FG', 'OPP_FGA', 'OPP_FG%', 'OPP_3P', 'OPP_3PA', 'OPP_3P%', 'OPP_2P', 'OPP_2PA', 'OPP_2P%', 'OPP_FT', 'OPP_FTA', 'OPP_FT%', 'OPP_ORB', 'OPP_DRB', 'OPP_TRB', 'OPP_AST', 'OPP_STL', 'OPP_BLK', 'OPP_TOV', 'OPP_PF', 'OPP_PTS']
self.assertCountEqual(list(series.index), expected_indices)
def test_get_roster_stats(self):
df = get_roster_stats('GSW', 2019)
expected_columns = ['PLAYER', 'POS', 'AGE', 'TEAM', 'G', 'GS', 'MP', 'FG', 'FGA', 'FG%', '3P', '3PA', '3P%', '2P', '2PA', '2P%', 'eFG%', 'FT', 'FTA', 'FT%', 'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS', 'SEASON']
self.assertCountEqual(list(df.columns), expected_columns)
def test_get_team_misc(self):
series = get_team_misc('GSW', 2019)
expected_indices = ['AGE', 'W', 'L', 'PW', 'PL', 'MOV', 'SOS', 'SRS', 'ORtg', 'DRtg', 'NRtg', 'PACE', 'FTr', '3PAr', 'TS%', 'eFG%', 'TOV%', 'ORB%', 'FT/FGA', 'eFG%', 'TOV%', 'DRB%', 'FT/FGA', 'ARENA', 'ATTENDANCE', 'ATTENDANCE/G', 'TEAM', 'SEASON']
self.assertCountEqual(list(series.index), expected_indices)
series = get_team_misc('CHO', 2019)
self.assertCountEqual(list(series.index), expected_indices)
series = get_team_misc('NOK', 2007)
self.assertCountEqual(list(series.index), expected_indices)
series = get_team_misc('TCB', 1951)
self.assertCountEqual(list(series.index), expected_indices)
if __name__ == '__main__':
unittest.main()
| 3.046875 | 3 |
dtf/affiliates/views.py | WebPowerLabs/django-trainings | 0 | 12771234 | from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.contrib.auth.decorators import login_required
from utils.views import PermissionMixin
from .models import Partner, PartnerProduct
class PartnerListView(PermissionMixin, ListView):
model = Partner
decorators = {'GET': login_required}
queryset = Partner.objects.filter(active=True)
template_name = "affiliates/partner_list.html"
class PartnerDetailView(PermissionMixin, DetailView):
model = Partner
decorators = {'GET': login_required}
template_name = "affiliates/partner_detail.html" | 2.015625 | 2 |
tests/task/nlp/test_masked_language_modeling.py | techthiyanes/lightning-transformers | 0 | 12771235 | <filename>tests/task/nlp/test_masked_language_modeling.py
import sys
from unittest.mock import MagicMock
import pytest
import pytorch_lightning as pl
from transformers import AutoTokenizer
from lightning_transformers.task.nlp.masked_language_modeling import (
MaskedLanguageModelingDataConfig,
MaskedLanguageModelingDataModule,
MaskedLanguageModelingTransformer,
)
@pytest.mark.skipif(sys.platform == "win32", reason="Currently Windows is not supported")
def test_smoke_train(hf_cache_path):
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="prajjwal1/bert-tiny")
model = MaskedLanguageModelingTransformer(pretrained_model_name_or_path="prajjwal1/bert-tiny")
dm = MaskedLanguageModelingDataModule(
cfg=MaskedLanguageModelingDataConfig(
batch_size=1,
dataset_name="wikitext",
dataset_config_name="wikitext-2-raw-v1",
cache_dir=hf_cache_path,
),
tokenizer=tokenizer,
)
trainer = pl.Trainer(fast_dev_run=True)
trainer.fit(model, dm)
@pytest.mark.skipif(sys.platform == "win32", reason="Currently Windows is not supported")
def test_smoke_predict():
model = MaskedLanguageModelingTransformer(
pretrained_model_name_or_path="prajjwal1/bert-tiny",
tokenizer=AutoTokenizer.from_pretrained(pretrained_model_name_or_path="prajjwal1/bert-tiny"),
)
y = model.hf_predict("The cat sat on [MASK] mat.")
assert len(y) == 5
assert y[0]["token_str"] == "the"
def test_model_has_correct_cfg():
model = MaskedLanguageModelingTransformer(pretrained_model_name_or_path="prajjwal1/bert-tiny")
assert model.hparams.downstream_model_type == "transformers.AutoModelForMaskedLM"
def test_datamodule_has_correct_cfg():
tokenizer = MagicMock()
dm = MaskedLanguageModelingDataModule(tokenizer)
assert isinstance(dm.cfg, MaskedLanguageModelingDataConfig)
assert dm.tokenizer is tokenizer
| 2.265625 | 2 |
math220/Homework06/ten-digit-problem.py | oniani/udmath | 0 | 12771236 | """
Author: <NAME>
Purpose: Homework (problem 8)
NOTE: I have not included the algorithm to check
that 6210001000 is indeed the only number that
meets the conditions. It needs a bit more explanation
for optimizations so I decided to take it out.
"""
def check(ten_digit_number):
"""
Function to check whether the number satisfied the
conditions stated in the problem or not.
"""
for idx in range(len(list(str(ten_digit_number)))):
digit = list(str(ten_digit_number))[idx]
if int(digit) != str(ten_digit_number).count(str(idx)):
return False
return True
def main():
"""
Verify that 6210001000 meets the conditions.
"""
print(check(6210001000)) # True! Thus, this number is indeed the answer
if __name__ == "__main__":
main()
| 3.671875 | 4 |
model/m3d_model.py | SB-BISS/RLACOSarsaLambda | 7 | 12771237 | <reponame>SB-BISS/RLACOSarsaLambda
import numpy as np
import math
'''
@author <NAME>
This is a class whose only function is to provide the next_state for the mountain 3D car problem
basically, this is a model that the agent can use to query what will happen is an action is performed.
In order to be able to use these models with the HAAApprximatedSarsaLambdaAgent
you only need to implement the next_state method.
For the purpose of tihs library, I am not that interested in knowing the next reward
eventually this model could be extended a bit
'''
class m3d_model(object):
def __init__(self):
self.min_position_x = -1.2
self.max_position_x = 0.6
self.max_speed_x = 0.07
self.goal_position_x = 0.5
self.min_position_y = -1.2
self.max_position_y = 0.6
self.max_speed_y = 0.07
self.goal_position_y = 0.5
def next_state(self, current_observation, action):
position_x, velocity_x,position_y, velocity_y = current_observation
if action == 4:
velocity_x += math.cos(3*position_x)*(-0.0025)
velocity_y += math.cos(3*position_y)*(-0.0025)
if action == 3:
velocity_x += 0.002 + math.cos(3*position_x)*(-0.0025)
if action == 2:
velocity_y += 0.002 + math.cos(3*position_y)*(-0.0025)
if action == 1:
velocity_x += -0.002 + math.cos(3*position_x)*(-0.0025)
if action == 0:
velocity_y += -0.002 + math.cos(3*position_y)*(-0.0025)
#typical limits of mountain car, but in two dimensions
velocity_x = np.clip(velocity_x, -self.max_speed_x, self.max_speed_x)
velocity_y = np.clip(velocity_y, -self.max_speed_y, self.max_speed_y)
position_x += velocity_x
position_y += velocity_y
position_x = np.clip(position_x, self.min_position_x, self.max_position_x)
position_y = np.clip(position_y, self.min_position_y, self.max_position_y)
if (position_x==self.min_position_x and velocity_x<0): velocity_x = 0
if (position_y==self.min_position_y and velocity_y<0): velocity_y = 0
next_state = (position_x, velocity_x,position_y,velocity_y)
return np.array(next_state)
| 2.9375 | 3 |
project-euler/0018_maximum_path_sum.py | alenic/comprosol | 0 | 12771238 | s = """75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"""
max_level = 15
t = []
for l in s.split("\n"):
t.append([int(x) for x in l.split()])
max_sum = 0
def search(node_sum, level, i_child):
global max_sum
if level == max_level-1:
if node_sum > max_sum:
max_sum = node_sum
return
search(node_sum + t[level+1][i_child], level+1, i_child)
search(node_sum + t[level+1][i_child+1], level+1, i_child+1)
search(75, 0, 0)
print(max_sum) | 2.71875 | 3 |
datamine/loaders/sofr.py | Saran33/datamine_python | 39 | 12771239 | from . import Loader
import pandas as pd
class SOFROISLoader(Loader):
dataset = 'SOFR'
fileglob = 'SOFR_OIS_*.csv'
columns = ['Trade Date', 'Exchange Code', 'Currency','Commodity Code',
'Short Description','Long Description', 'Curve Date', 'Offset',
'Discount Factor', 'Forward rate', 'Rate']
dtypes = {'category': ('Exchange Code', 'Currency', 'Commodity Code',
'Short Description', 'Long Description','Curve Date','Forward rate'),
'int64': ('Offset',),
'float': ('Discount Factor','Rate'),
'date:%Y%m%d': ('Trade Date',)}
def _load(self, file):
# Assumption: the header from the value column provides
# the name of the measure for that CSV file.
df = pd.read_csv(file, low_memory=False)
return df
sofroisLoader = SOFROISLoader()
| 3.078125 | 3 |
flak/cli.py | avdd/flak | 0 | 12771240 | <filename>flak/cli.py
# -*- coding: utf-8 -*-
import os
import sys
from threading import Lock, Thread
from functools import update_wrapper
import click
from ._compat import iteritems, reraise
class AppNotFound(click.UsageError):
pass
def find_best_app(module):
from .app import Flak
# search for the most common names first.
for attr_name in 'app', 'application':
app = getattr(module, attr_name, None)
if app is not None and isinstance(app, Flak):
return app
# otherwise find the only object that is a flak instance.
matches = [v for k, v in iteritems(module.__dict__)
if isinstance(v, Flak)]
if len(matches) == 1:
return matches[0]
raise AppNotFound('Failed to find application in module "%s". Are '
'you sure it contains a Flak application? Maybe '
'you wrapped it in a WSGI middleware or you are '
'using a factory function.' % module.__name__)
def prepare_exec_for_file(filename):
module = []
if filename.endswith('.py'):
filename = filename[:-3]
elif os.path.split(filename)[1] == '__init__.py':
filename = os.path.dirname(filename)
else:
raise AppNotFound('The file provided (%s) does exist but is not a '
'valid Python file. This means that it cannot '
'be used as application. Please change the '
'extension to .py' % filename)
filename = os.path.realpath(filename)
dirpath = filename
while 1:
dirpath, extra = os.path.split(dirpath)
module.append(extra)
if not os.path.isfile(os.path.join(dirpath, '__init__.py')):
break
sys.path.insert(0, dirpath)
return '.'.join(module[::-1])
def locate_app(app_id):
__traceback_hide__ = True
if ':' in app_id:
module, app_obj = app_id.split(':', 1)
else:
module = app_id
app_obj = None
__import__(module)
mod = sys.modules[module]
if app_obj is None:
app = find_best_app(mod)
else:
app = getattr(mod, app_obj, None)
if app is None:
raise RuntimeError('Failed to find application in module "%s"'
% module)
return app
class DispatchingApp(object):
'''Special application that dispatches to a flak application which
is imported by name in a background thread. If an error happens
it is is recorded and shows as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
'''
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
def __init__(self, app_import_path=None, debug=None, create_app=None):
#: The application import path
self.app_import_path = app_import_path
#: The debug flag. If this is not None, the application will
#: automatically have it's debug flag overridden with this value.
self.debug = debug
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self._loaded_app = None
def load_app(self):
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
rv = self.create_app(self)
else:
if self.app_import_path is None:
raise AppNotFound('Could not locate Flak application. '
'You did not provide FLAK_APP or the '
'--app parameter.')
rv = locate_app(self.app_import_path)
if self.debug is not None:
rv.debug = self.debug
self._loaded_app = rv
return rv
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_app(f):
@click.pass_context
def decorator(__cx, *args, **kwargs):
app = __cx.ensure_object(ScriptInfo).load_app()
return __cx.invoke(f, app, *args, **kwargs)
return update_wrapper(decorator, f)
def set_debug_value(cx, param, value):
cx.ensure_object(ScriptInfo).debug = value
def set_app_value(cx, param, value):
if value is not None:
if os.path.isfile(value):
value = prepare_exec_for_file(value)
elif '.' not in sys.path:
sys.path.insert(0, '.')
cx.ensure_object(ScriptInfo).app_import_path = value
debug_option = click.Option(['--debug/--no-debug'],
help='Enable or disable debug mode.',
default=None, callback=set_debug_value)
app_option = click.Option(['-a', '--app'],
help='The application to run',
callback=set_app_value, is_eager=True)
class CommandGroup(click.Group):
def command(self, *args, **kwargs):
wrap_for_cx = kwargs.pop('with_app', True)
def decorator(f):
if wrap_for_cx:
f = with_app(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
kwargs.setdefault('cls', CommandGroup)
return click.Group.group(self, *args, **kwargs)
class FlakGroup(CommandGroup):
def __init__(self, add_default_commands=True, add_app_option=None,
add_debug_option=True, create_app=None, **extra):
params = list(extra.pop('params', None) or ())
if add_app_option is None:
add_app_option = create_app is None
if add_app_option:
params.append(app_option)
if add_debug_option:
params.append(debug_option)
CommandGroup.__init__(self, params=params, **extra)
self.create_app = create_app
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
def get_command(self, cx, name):
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = CommandGroup.get_command(self, cx, name)
if rv is not None:
return rv
info = cx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(cx, name)
if rv is not None:
return rv
except AppNotFound:
pass
def list_commands(self, cx):
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, cx))
info = cx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(cx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
pass
return sorted(rv)
def main(self, *args, **kwargs):
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app)
kwargs['obj'] = obj
kwargs.setdefault('auto_envvar_prefix', 'FLAK')
return CommandGroup.main(self, *args, **kwargs)
def script_info_option(*args, **kwargs):
try:
key = kwargs.pop('script_info_key')
except LookupError:
raise TypeError('script_info_key not provided.')
real_callback = kwargs.get('callback')
def callback(cx, param, value):
if real_callback is not None:
value = real_callback(cx, value)
cx.ensure_object(ScriptInfo).data[key] = value
return value
kwargs['callback'] = callback
kwargs.setdefault('is_eager', True)
return click.option(*args, **kwargs)
@click.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@click.option('--with-threads/--without-threads', default=False,
help='Enable or disable multithreading.')
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading,
with_threads):
'''Runs a local development server for the Flak application.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments. By default it will
not support any sort of concurrency at all to simplify debugging. This
can be changed with the --with-threads option which will enable basic
multithreading.
The reloader and debugger are by default enabled if the debug flag of
Flak is enabled and disabled otherwise.
'''
from werkzeug.serving import run_simple
if reload is None:
reload = info.debug
if debugger is None:
debugger = info.debug
if eager_loading is None:
eager_loading = not reload
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
# Extra startup messages. This depends a but on Werkzeug internals to
# not double execute when the reloader kicks in.
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
# If we have an import path we can print it out now which can help
# people understand what's being served. If we do not have an
# import path because the app was loaded through a callback then
# we won't print anything.
if info.app_import_path is not None:
print(' * Serving Flak app "%s"' % info.app_import_path)
if info.debug is not None:
print(' * Forcing debug %s' % (info.debug and 'on' or 'off'))
run_simple(host, port, app, use_reloader=reload,
use_debugger=debugger, threaded=with_threads)
@click.command('shell', short_help='Runs a shell in the app context.')
@with_app
def shell_command(app):
import code
banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % (
sys.version,
sys.platform,
app.import_name,
app.debug and ' [debug]' or '',
app.instance_path,
)
cx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), cx)
cx.update(app.make_shell_context())
code.interact(banner=banner, local=cx)
cli = FlakGroup(help='''\
This shell command acts as general utility script for Flak applications.
It loads the application configured (either through the FLAK_APP environment
variable or the --app parameter) and then provides commands either provided
by the application or Flak itself.
The most useful commands are the "run" and "shell" command.
Example usage:
flak --app=hello --debug run
''')
def main(as_module=False):
this_module = __package__ + '.cli'
args = sys.argv[1:]
if as_module:
if sys.version_info >= (2, 7):
name = 'python -m ' + this_module.rsplit('.', 1)[0]
else:
name = 'python -m ' + this_module
# This module is always executed as "python -m flak.run" and as such
# we need to ensure that we restore the actual command line so that
# the reloader can properly operate.
sys.argv = ['-m', this_module] + sys.argv[1:]
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
| 2.0625 | 2 |
models/_resnet.py | yhlhit/dann | 0 | 12771241 | <filename>models/_resnet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn.init as init
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
#残差模块,由两个卷基层构成 resnet18 resnet34
expansion = 1
def __init__(self,inplanes,planes,stride=1,downsample=None):
#construct a sample, build layers, 3x3 convolutional layers
super(BasicBlock,self).__init__()
self.conv1 = nn.conv2d(inplanes,planes,stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.conv2d(planes,planes,stride)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride=stride
#self.relu2 = nn.ReLU(inplace=True)
def forward(self, x):
#use layers for data flow
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
#1x1 3x3 1x1 for resnet 50 or resnet 110
def __init__(self,inplanes, planes, stride=1,downsample=None):
super(Bottleneck,self).__init__()
self.conv1 = nn.Conv2d(inplanes,planes,kernel_size=1,bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes,planes,kernel_size=3)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes,planes*self.expansion,kernel_size =-1,bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self,x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
out = self.downsample(out)
out += residual
out = self.relu(out)
return out
class GradReverse(torch.autograd.Function):
"""
Extension of grad reverse layer
"""
@staticmethod
def forward(ctx, x, constant):
ctx.constant = constant
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.neg() * ctx.constant
return grad_output, None
def grad_reverse(x, constant):
return GradReverse.apply(x, constant)
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=31):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class Extractor(nn.Module):
def __init__(self):
super(Extractor, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 48, kernel_size=5)
# self.conv1 = nn.Conv2d(3, 64, kernel_size= 5)
# self.bn1 = nn.BatchNorm2d(64)
# self.conv2 = nn.Conv2d(64, 50, kernel_size= 5)
# self.bn2 = nn.BatchNorm2d(50)
self.conv2_drop = nn.Dropout2d()
def forward(self, input):
input = input.expand(input.data.shape[0], 3, 28, 28)
# x = F.relu(F.max_pool2d(self.bn1(self.conv1(input)), 2))
# x = F.relu(F.max_pool2d(self.conv2_drop(self.bn2(self.conv2(x))), 2))
# x = x.view(-1, 50 * 4 * 4)
x = F.relu(F.max_pool2d(self.conv1(input), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 48 * 4 * 4)
return x
class Class_classifier(nn.Module):
def __init__(self):
super(Class_classifier, self).__init__()
# self.fc1 = nn.Linear(50 * 4 * 4, 100)
# self.bn1 = nn.BatchNorm1d(100)
# self.fc2 = nn.Linear(100, 100)
# self.bn2 = nn.BatchNorm1d(100)
# self.fc3 = nn.Linear(100, 10)
self.fc1 = nn.Linear(48 * 4 * 4, 100)
self.fc2 = nn.Linear(100, 100)
self.fc3 = nn.Linear(100, 10)
def forward(self, input):
# logits = F.relu(self.bn1(self.fc1(input)))
# logits = self.fc2(F.dropout(logits))
# logits = F.relu(self.bn2(logits))
# logits = self.fc3(logits)
logits = F.relu(self.fc1(input))
logits = self.fc2(F.dropout(logits))
logits = F.relu(logits)
logits = self.fc3(logits)
return F.log_softmax(logits, 1)
class Domain_classifier(nn.Module):
def __init__(self):
super(Domain_classifier, self).__init__()
# self.fc1 = nn.Linear(50 * 4 * 4, 100)
# self.bn1 = nn.BatchNorm1d(100)
# self.fc2 = nn.Linear(100, 2)
self.fc1 = nn.Linear(48 * 4 * 4, 100)
self.fc2 = nn.Linear(100, 2)
def forward(self, input, constant):
input = GradReverse.grad_reverse(input, constant)
# logits = F.relu(self.bn1(self.fc1(input)))
# logits = F.log_softmax(self.fc2(logits), 1)
logits = F.relu(self.fc1(input))
logits = F.log_softmax(self.fc2(logits), 1)
return logits
class SVHN_Extractor(nn.Module):
def __init__(self):
super(SVHN_Extractor, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size= 5)
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size= 5)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, kernel_size= 5, padding= 2)
self.bn3 = nn.BatchNorm2d(128)
self.conv3_drop = nn.Dropout2d()
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode= 'fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
def forward(self, input):
input = input.expand(input.data.shape[0], 3, 28, 28)
x = F.relu(self.bn1(self.conv1(input)))
x = F.max_pool2d(x, 3, 2)
x = F.relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, 3, 2)
x = F.relu(self.bn3(self.conv3(x)))
x = self.conv3_drop(x)
return x.view(-1, 128 * 3 * 3)
class SVHN_Class_classifier(nn.Module):
def __init__(self):
super(SVHN_Class_classifier, self).__init__()
self.fc1 = nn.Linear(128 * 3 * 3, 3072)
self.bn1 = nn.BatchNorm1d(3072)
self.fc2 = nn.Linear(3072, 2048)
self.bn2 = nn.BatchNorm1d(2048)
self.fc3 = nn.Linear(2048, 10)
def forward(self, input):
logits = F.relu(self.bn1(self.fc1(input)))
logits = F.dropout(logits)
logits = F.relu(self.bn2(self.fc2(logits)))
logits = self.fc3(logits)
return F.log_softmax(logits, 1)
class SVHN_Domain_classifier(nn.Module):
def __init__(self):
super(SVHN_Domain_classifier, self).__init__()
self.fc1 = nn.Linear(128 * 3 * 3, 1024)
self.bn1 = nn.BatchNorm1d(1024)
self.fc2 = nn.Linear(1024, 1024)
self.bn2 = nn.BatchNorm1d(1024)
self.fc3 = nn.Linear(1024, 2)
def forward(self, input, constant):
input = GradReverse.grad_reverse(input, constant)
logits = F.relu(self.bn1(self.fc1(input)))
logits = F.dropout(logits)
logits = F.relu(self.bn2(self.fc2(logits)))
logits = F.dropout(logits)
logits = self.fc3(logits)
return F.log_softmax(logits, 1)
| 2.921875 | 3 |
python/isarnproject/sketches/spark/tdigest.py | SemanticBeeng/isarn-sketches-spark | 26 | 12771242 | <reponame>SemanticBeeng/isarn-sketches-spark
import sys
import random
import itertools as it
from bisect import bisect_left, bisect_right
from pyspark.sql.types import UserDefinedType, StructField, StructType, \
ArrayType, DoubleType, IntegerType
from pyspark.sql.column import Column, _to_java_column, _to_seq
from pyspark.context import SparkContext
__all__ = ['tdigestIntUDF', 'tdigestLongUDF', 'tdigestFloatUDF', 'tdigestDoubleUDF', \
'tdigestMLVecUDF', 'tdigestMLLibVecUDF', \
'tdigestIntArrayUDF', 'tdigestLongArrayUDF', 'tdigestFloatArrayUDF', 'tdigestDoubleArrayUDF', \
'tdigestReduceUDF', 'tdigestArrayReduceUDF', \
'TDigest']
def tdigestIntUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of integer data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestIntUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestLongUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of long integer data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestLongUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestFloatUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of (single precision) float data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestFloatUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestDoubleUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of double float data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestDoubleUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestMLVecUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of ML Vector data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestMLVecUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestMLLibVecUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of MLLib Vector data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestMLLibVecUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestIntArrayUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of integer-array data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestIntArrayUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestLongArrayUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of long-integer array data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestLongArrayUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestFloatArrayUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of (single-precision) float array data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestFloatArrayUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestDoubleArrayUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of double array data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestDoubleArrayUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestReduceUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of t-digests.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestReduceUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestArrayReduceUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of t-digest vectors.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestArrayReduceUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
class TDigestUDT(UserDefinedType):
@classmethod
def sqlType(cls):
return StructType([
StructField("compression", DoubleType(), False),
StructField("maxDiscrete", IntegerType(), False),
StructField("cent", ArrayType(DoubleType(), False), False),
StructField("mass", ArrayType(DoubleType(), False), False)])
@classmethod
def module(cls):
return "isarnproject.sketches.udt.tdigest"
@classmethod
def scalaUDT(cls):
return "org.apache.spark.isarnproject.sketches.udtdev.TDigestUDT"
def simpleString(self):
return "tdigest"
def serialize(self, obj):
if isinstance(obj, TDigest):
return (obj.compression, obj.maxDiscrete, obj._cent, obj._mass)
else:
raise TypeError("cannot serialize %r of type %r" % (obj, type(obj)))
def deserialize(self, datum):
return TDigest(datum[0], datum[1], datum[2], datum[3])
class TDigest(object):
"""
A T-Digest sketch of a cumulative numeric distribution.
This is a "read-only" python mirror of org.isarnproject.sketches.java.TDigest which supports
all cdf and sampling methods, but does not currently support update with new data. It is
assumed to have been produced with a t-digest aggregating UDF, also exposed in this package.
"""
# Because this is a value and not a function, TDigestUDT has to be defined above,
# and in the same file.
__UDT__ = TDigestUDT()
def __init__(self, compression, maxDiscrete, cent, mass):
self.compression = float(compression)
self.maxDiscrete = int(maxDiscrete)
assert self.compression > 0.0, "compression must be > 0"
assert self.maxDiscrete >= 0, "maxDiscrete must be >= 0"
self._cent = [float(v) for v in cent]
self._mass = [float(v) for v in mass]
assert len(self._mass) == len(self._cent), "cluster mass and cent must have same dimension"
self.nclusters = len(self._cent)
# Current implementation is "read only" so we can just store cumulative sum here.
# To support updating, 'csum' would need to become a Fenwick tree array
self._csum = list(it.accumulate(self._mass))
def __repr__(self):
return "TDigest(%s, %s, %s, %s)" % \
(repr(self.compression), repr(self.maxDiscrete), repr(self._cent), repr(self._mass))
def mass(self):
"""
Total mass accumulated by this TDigest
"""
if len(self._csum) == 0: return 0.0
return self._csum[-1]
def size(self):
"""
Number of clusters in this TDigest
"""
return len(self._cent)
def isEmpty(self):
"""
Returns True if this TDigest is empty, False otherwise
"""
return len(self._cent) == 0
def __reduce__(self):
return (self.__class__, (self.compression, self.maxDiscrete, self._cent, self._mass, ))
def _lmcovj(self, m):
assert self.nclusters >= 2
assert (m >= 0.0) and (m <= self.mass())
return bisect_left(self._csum, m)
def _rmcovj(self, m):
assert self.nclusters >= 2
assert (m >= 0.0) and (m <= self.mass())
return bisect_right(self._csum, m) - 1
def _rcovj(self, x):
return bisect_right(self._cent, x) - 1
# emulates behavior from isarn java TDigest, which computes
# cumulative sum via a Fenwick tree
def _ftSum(self, j):
if (j < 0): return 0.0
if (j >= self.nclusters): return self.mass()
return self._csum[j]
def cdf(self, xx):
"""
Return CDF(x) of a numeric value x, with respect to this TDigest CDF sketch.
"""
x = float(xx)
j1 = self._rcovj(x)
if (j1 < 0): return 0.0
if (j1 >= self.nclusters - 1): return 1.0
j2 = j1 + 1
c1 = self._cent[j1]
c2 = self._cent[j2]
tm1 = self._mass[j1]
tm2 = self._mass[j2]
s = self._ftSum(j1 - 1)
d1 = 0.0 if (j1 == 0) else tm1 / 2.0
m1 = s + d1
m2 = m1 + (tm1 - d1) + (tm2 if (j2 == self.nclusters - 1) else tm2 / 2.0)
m = m1 + (x - c1) * (m2 - m1) / (c2 - c1)
return min(m2, max(m1, m)) / self.mass()
def cdfInverse(self, qq):
"""
Given a value q on [0,1], return the value x such that CDF(x) = q.
Returns NaN for any q > 1 or < 0, or if this TDigest is empty.
"""
q = float(qq)
if (q < 0.0) or (q > 1.0): return float('nan')
if (self.nclusters == 0): return float('nan')
if (self.nclusters == 1): return self._cent[0]
if (q == 0.0): return self._cent[0]
if (q == 1.0): return self._cent[self.nclusters - 1]
m = q * self.mass()
j1 = self._rmcovj(m)
j2 = j1 + 1
c1 = self._cent[j1]
c2 = self._cent[j2]
tm1 = self._mass[j1]
tm2 = self._mass[j2]
s = self._ftSum(j1 - 1)
d1 = 0.0 if (j1 == 0) else tm1 / 2.0
m1 = s + d1
m2 = m1 + (tm1 - d1) + (tm2 if (j2 == self.nclusters - 1) else tm2 / 2.0)
x = c1 + (m - m1) * (c2 - c1) / (m2 - m1)
return min(c2, max(c1, x))
def cdfDiscrete(self, xx):
"""
return CDF(x) for a numeric value x, assuming the sketch is representing a
discrete distribution.
"""
x = float(xx)
j = self._rcovj(x)
return self._ftSum(j) / self.mass()
def cdfDiscreteInverse(self, qq):
"""
Given a value q on [0,1], return the value x such that CDF(x) = q, assuming
the sketch is represenging a discrete distribution.
Returns NaN for any q > 1 or < 0, or if this TDigest is empty.
"""
q = float(qq)
if (q < 0.0) or (q > 1.0): return float('nan')
if self.nclusters == 0: return float('nan')
if self.nclusters == 1: return self._cent[0]
m = q * self.mass()
j = self._lmcovj(m)
return self._cent[j]
def samplePDF(self):
"""
Return a random sampling from the sketched distribution, using inverse
transform sampling, assuming a continuous distribution.
"""
return self.cdfInverse(random.random())
def samplePMF(self):
"""
Return a random sampling from the sketched distribution, using inverse
transform sampling, assuming a discrete distribution.
"""
return self.cdfDiscreteInverse(random.random())
def sample(self):
"""
Return a random sampling from the sketched distribution, using inverse
transform sampling, assuming a discrete distribution if the number of
TDigest clusters is <= maxDiscrete, and a continuous distribution otherwise.
"""
if self.maxDiscrete <= self.nclusters:
return self.cdfDiscreteInverse(random.random())
return self.cdfInverse(random.random())
| 2.375 | 2 |
myapps/jupyter/TensorFlow/Distributed/distributed_cnn.py | alonsoir/pipeline | 1 | 12771243 | <gh_stars>1-10
import tensorflow as tf
import numpy as np
# Modules required for file download and extraction
import os
import sys
import tarfile
from six.moves.urllib.request import urlretrieve
from scipy import ndimage
outdir = '/tmp/pipeline/datasets/notmist/'
def maybe_download(filename, url, force=False):
"""Download a file if not present."""
if force or not os.path.exists(outdir + filename):
filename, _ = urlretrieve(url + filename, outdir + filename)
print('\nDownload complete for {}'.format(filename))
else:
print('File {} already present.'.format(filename))
print(filename)
return outdir + filename
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('{} already present - don\'t need to extract {}.'.format(root, filename))
else:
print('Extracting data for {}. This may take a while. Please wait.'.format(root))
print(filename)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(root[0:root.rfind('/') + 1])
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
print(data_folders)
return data_folders
# Locations to download data:
url = 'http://yaroslavvb.com/upload/notMNIST/'
# Download two datasets
train_zip_path = maybe_download('notMNIST_small.tar.gz', url)
# Extract datasets
train_folders = maybe_extract(train_zip_path)
image_height = 28 # Pixel height of images
image_width = 28 # Pixel width of images
pixel_depth = 255.0 # Number of levels per pixel
expected_img_shape = (image_height, image_width) # Black and white image, no 3rd dimension
num_labels = len(train_folders)
def load_image_folder(folder):
"""Load the data for a single image label."""
# Create a list of image paths inside the folder
image_files = os.listdir(folder)
# Create empty numpy array to hold data
dataset = np.ndarray(shape=(len(image_files), image_height, image_width),
dtype=np.float32)
num_images = 0 # Counter for number of successful images loaded
for image in image_files:
image_file = os.path.join(folder, image)
try:
# Read in image pixel data as floating point values
image_data = ndimage.imread(image_file).astype(float)
# Scale values: [0.0, 255.0] => [-1.0, 1.0]
image_data = (image_data - pixel_depth / 2) / (pixel_depth / 2)
if image_data.shape != expected_img_shape:
print('File {} has unexpected dimensions: '.format(str(image_data.shape)))
continue
# Add image to the numpy array dataset
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- skipping this file and moving on.')
# Trim dataset to remove unused space
dataset = dataset[0:num_images, :, :]
return dataset
def make_data_label_arrays(num_rows, image_height, image_width):
"""
Creates and returns empty numpy arrays for input data and labels
"""
if num_rows:
dataset = np.ndarray((num_rows, image_height, image_width), dtype=np.float32)
labels = np.ndarray(num_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def collect_datasets(data_folders):
datasets = []
total_images = 0
for label, data_folder in enumerate(data_folders):
# Bring all test folder images in as numpy arrays
dataset = load_image_folder(data_folder)
num_images = len(dataset)
total_images += num_images
datasets.append((dataset, label, num_images))
return datasets, total_images
def merge_train_test_datasets(datasets, total_images, percent_test):
num_train = total_images * (1.0 - percent_test)
num_test = total_images * percent_test
train_dataset, train_labels = make_data_label_arrays(num_train, image_height, image_width)
test_dataset, test_labels = make_data_label_arrays(num_test, image_height, image_width)
train_counter = 0
test_counter = 0
dataset_counter = 1
for dataset, label, num_images in datasets:
np.random.shuffle(dataset)
if dataset_counter != len(datasets):
n_v = num_images // (1.0 / percent_test)
n_t = num_images - n_v
else:
# Last label, make sure dataset sizes match up to what we created
n_v = len(test_dataset) - test_counter
n_t = len(train_dataset) - train_counter
train_dataset[train_counter: train_counter + n_t] = dataset[:n_t]
train_labels[train_counter: train_counter + n_t] = label
test_dataset[test_counter: test_counter + n_v] = dataset[n_t: n_t + n_v]
test_labels[test_counter: test_counter + n_v] = label
train_counter += n_t
test_counter += n_v
dataset_counter += 1
return train_dataset, train_labels, test_dataset, test_labels
train_test_datasets, train_test_total_images = collect_datasets(train_folders)
train_dataset, train_labels, test_dataset, test_labels = \
merge_train_test_datasets(train_test_datasets, train_test_total_images, 0.1)
# Convert data examples into 3-D tensors
num_channels = 1 # grayscale
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, image_height, image_width, num_channels)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
def shuffle_data_with_labels(dataset, labels):
indices = range(len(dataset))
np.random.shuffle(indices)
new_data = np.ndarray(dataset.shape, dataset.dtype)
new_labels = np.ndarray(labels.shape, dataset.dtype)
n = 0
for i in indices:
new_data[n] = dataset[i]
new_labels[n] = labels[i]
n += 1
return new_data, new_labels
train_dataset, train_labels = shuffle_data_with_labels(train_dataset, train_labels)
CLUSTER_SPEC= """
{
'ps' : ['tensorflow0.pipeline.io:8888', 'tensorflow1.pipeline.io:8888'],
'worker' : ['tensorflow2.pipeline.io:8888','tensorflow3.pipeline.io:8888'],
}
"""
import ast
cluster_spec = ast.literal_eval(CLUSTER_SPEC)
spec = tf.train.ClusterSpec(cluster_spec)
workers = ['/job:worker/task:{}'.format(i) for i in range(len(cluster_spec['worker']))]
param_servers = ['/job:ps/task:{}'.format(i) for i in range(len(cluster_spec['ps']))]
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=True)
graph = tf.Graph()
print_versions = []
with graph.as_default():
for worker in workers:
with tf.device(worker):
version = tf.Print(["active"], ["version"], message="worker is ")
print_versions.append(version)
target = "grpc://tensorflow0.pipeline.io:8888"
with tf.Session(target, graph=graph, config=sess_config) as session:
print(session.run(print_versions))
patch_size = 5
depth = 16
num_hidden = 64
def variable_summaries(var, name):
with tf.name_scope("summaries"):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.scalar_summary('sttdev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def weight_variable(shape, name):
return tf.Variable(tf.truncated_normal(
shape, stddev=0.1), name=name)
def bias_variable(shape, name):
return tf.Variable(tf.constant(0.1, shape=shape), name=name)
def conv2D(data, W, b):
conv = tf.nn.conv2d(data, W, [1, 2, 2, 1], padding='SAME', name="2DConvolution")
return tf.nn.relu(conv + b, name="ReLu")
def fc(data, W, b):
shape = data.get_shape().as_list()
reshape = tf.reshape(data, [-1, shape[1] * shape[2] * shape[3]])
return tf.nn.relu(tf.nn.xw_plus_b(reshape, W, b), name="ReLu")
def model(data):
with tf.name_scope("Layer1"):
activations = conv2D(data, layer1_weights, layer1_biases)
dropped = tf.nn.dropout(activations, 0.5, name="Dropout")
with tf.name_scope("Layer2"):
activations = conv2D(dropped, layer2_weights, layer2_biases)
dropped = tf.nn.dropout(activations, 0.5, name="Dropout")
with tf.name_scope("Layer3"):
activations = fc(dropped, layer3_weights, layer3_biases)
return tf.matmul(activations, layer4_weights) + layer4_biases
graph = tf.Graph()
# divide the input across the cluster:
reduce_loss = []
with graph.as_default():
device_setter = tf.train.replica_device_setter(cluster=cluster_spec)
with tf.device(device_setter):
global_step = tf.Variable(0, name="global_step", trainable=False)
# Input data.
input_data = tf.placeholder(
tf.float32, shape=(None, image_height, image_width, num_channels), name="input_data")
input_labels = tf.placeholder(tf.float32, shape=(None, num_labels), name="input_labels")
layer1_weights = weight_variable([patch_size, patch_size, num_channels, depth], "L1Weights")
layer1_biases = bias_variable([depth], "L1Bias")
layer2_weights = weight_variable([patch_size, patch_size, depth, depth], "L2Weights")
layer2_biases = bias_variable([depth], "L2Bias")
layer3_weights = weight_variable([image_height // 4 * image_width // 4 * depth, num_hidden], "L3Weights")
layer3_biases = bias_variable([num_hidden], "L3Bias")
layer4_weights = weight_variable([num_hidden, num_labels], "L4Weights")
layer4_biases = bias_variable([num_labels], "L4Bias")
splitted = tf.split(0, len(workers), input_data)
label_splitted = tf.split(0, len(workers), input_labels)
# Add variable summaries
for v in [layer1_weights, layer2_weights, layer3_weights, layer4_weights, layer1_biases, layer2_biases, layer3_biases, layer4_biases]:
variable_summaries(v, v.name)
for idx, (portion, worker, label_portion) in enumerate(zip(splitted, workers, label_splitted)):
with tf.device(worker):
# Training computation.
local_reduce = tf.Print(portion, ["portion"], message="portion is")
logits = model(portion)
loss = tf.nn.softmax_cross_entropy_with_logits(logits, label_portion)
loss = tf.Print(loss, [tf.reduce_sum(loss), global_step], message="loss, global_step = ")
reduce_loss.append(loss)
with tf.device(device_setter):
# Optimizer.
mean_loss = tf.reduce_mean(tf.pack(reduce_loss))
optimizer = tf.train.RMSPropOptimizer(0.01).minimize(mean_loss, global_step=global_step)
init = tf.initialize_all_variables()
# Predictions for the training and test data.
model_prediction = tf.nn.softmax(logits, name="prediction")
label_prediction = tf.argmax(model_prediction, 1, name="predicted_label")
with tf.name_scope('summaries'):
tf.scalar_summary('loss', mean_loss)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(label_prediction, tf.argmax(label_portion, 1))
model_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', model_accuracy)
merged_summaries = tf.merge_all_summaries()
sv = tf.train.Supervisor(is_chief=True,
graph=graph,
logdir="/tmp/cnn_distributed",
init_op=init,
global_step=global_step)
# Directory to export TensorBoard summary statistics, graph data, etc.
TB_DIR = '/tmp/tensorboard/tf_cnn'
num_steps = 2000
batch_size = 256
with sv.prepare_or_wait_for_session(target, config=sess_config) as session:
writer = tf.train.SummaryWriter(TB_DIR, graph=session.graph)
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {input_data : batch_data, input_labels : batch_labels}
_, l, g_step = session.run(
[optimizer, loss, global_step], feed_dict=feed_dict)
if step % 50 == 0:
print('Minibatch loss at global_step %s: %s' % (g_step, np.mean(l)))
test_dict = {input_data : test_dataset, input_labels : test_labels}
test_accuracy = session.run(model_accuracy, feed_dict=test_dict)
print('Test accuracy: {}'.format(test_accuracy))
writer.close()
| 2.6875 | 3 |
src/design_patterns/solid/lsp/email_broadcaster_bad.py | schuna/design-patterns-python | 0 | 12771244 | from design_patterns.solid.lsp.common import User, SubscribedUser
class EmailBroadcaster:
def __init__(self):
print("Email Broadcaster")
def broadcast(self, event, user: User):
return f"Event: {event} to User: {user.name}"
class SubscriptionEmailBroadcaster(EmailBroadcaster):
def __init__(self):
super(SubscriptionEmailBroadcaster, self).__init__()
def broadcast(self, event, user: SubscribedUser):
return f"Event: {event} to User: {user.name}, subscribed: {user.subscribed}"
| 2.609375 | 3 |
mathematics/number-theory/salary-blues.py | PingHuskar/hackerrank | 41 | 12771245 | <reponame>PingHuskar/hackerrank
# Mathematics > Number Theory > Salary Blues
# Help manager of HackerX company to normalize salaries.
#
# https://www.hackerrank.com/challenges/salary-blues/problem
# https://www.hackerrank.com/contests/infinitum-apr14/challenges/salary-blues
# challenge id: 1833
#
from math import gcd
n, q = map(int, input().split())
A = list(map(int, input().split()))
a0 = A[0]
for i in range(1, len(A)):
A[i] -= a0
g = A[1]
for i in range(2, len(A)):
g = gcd(g, A[i])
for _ in range(q):
k = int(input())
if len(A) == 1:
print(a0 + k)
else:
print(gcd(g, a0 + k))
| 3.0625 | 3 |
GWP/QTGB/traj.py | binggu56/qmd | 0 | 12771246 | #!/usr/bin/python
import numpy as np
import pylab as plt
import seaborn as sns
sns.set_context("poster")
#with open("traj.dat") as f:
# data = f.read()
#
# data = data.split('\n')
#
# x = [row.split(' ')[0] for row in data]
# y = [row.split(' ')[1] for row in data]
#
# fig = plt.figure()
#
# ax1 = fig.add_subplot(111)
#
# ax1.set_title("Plot title...")
# ax1.set_xlabel('your x label..')
# ax1.set_ylabel('your y label...')
#
# ax1.plot(x,y, c='r', label='the data')
#
# leg = ax1.legend()
#fig = plt.figure()
plt.subplot(121)
#plt.ylim(-8,8)
data = np.genfromtxt(fname='q.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,data.shape[1]):
plt.plot(data[:,0],data[:,x])
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
plt.xlabel('time')
#plt.ylabel('position')
#plt.title('traj')
ax2 = plt.subplot(122)
data = np.genfromtxt(fname='c.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,data.shape[1]):
plt.plot(data[:,0],data[:,x])
plt.xlabel('time')
ax2.yaxis.tick_right()
ax2.yaxis.set_ticks_position('both')
plt.ylim(-0.2,5)
#plt.subplot(2,2,3)
#data = np.genfromtxt(fname='norm')
#plt.plot(data[:,0],data[:,1],'r-',linewidth=2)
#plt.ylabel('Norm')
#plt.ylim(0,2)
plt.legend()
plt.savefig('traj.pdf')
plt.show()
| 2.796875 | 3 |
examples/ble_hid_periph.py | nnja/Adafruit_CircuitPython_BLE | 0 | 12771247 | """
This example acts as a keyboard to peer devices.
"""
# import board
import sys
import time
import adafruit_ble
from adafruit_ble.advertising import Advertisement
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.standard.hid import HIDService
from adafruit_ble.services.standard.device_info import DeviceInfoService
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
# Use default HID descriptor
hid = HIDService()
device_info = DeviceInfoService(
software_revision=adafruit_ble.__version__, manufacturer="Adafruit Industries"
)
advertisement = ProvideServicesAdvertisement(hid)
advertisement.appearance = 961
scan_response = Advertisement()
ble = adafruit_ble.BLERadio()
if ble.connected:
for c in ble.connections:
c.disconnect()
print("advertising")
ble.start_advertising(advertisement, scan_response)
k = Keyboard(hid.devices)
kl = KeyboardLayoutUS(k)
while True:
while not ble.connected:
pass
print("Start typing:")
while ble.connected:
c = sys.stdin.read(1)
sys.stdout.write(c)
kl.write(c)
# print("sleeping")
time.sleep(0.1)
ble.start_advertising(advertisement)
| 3.078125 | 3 |
python/average_levels_of_binary_tree.py | anishLearnsToCode/leetcode-algorithms | 17 | 12771248 | <reponame>anishLearnsToCode/leetcode-algorithms<gh_stars>10-100
# Definition for a binary tree node.
from typing import List
from collections import deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def averageOfLevels(self, root: TreeNode) -> List[float]:
result = [0]
queue = deque()
queue.append(root)
queue.append(None)
count = 0
while queue:
current = queue.popleft()
if current is None and len(queue) == 0: break
if current is None:
queue.append(None)
result[-1] /= count
result.append(0)
count = 0
continue
count += 1
if current.left is not None: queue.append(current.left)
if current.right is not None: queue.append(current.right)
result[-1] += current.val
result[-1] /= count
return result
| 3.640625 | 4 |
lib/surface/asset/analyze_iam_policy.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2 | 12771249 | <gh_stars>1-10
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to analyze IAM policy in the specified root asset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.asset import client_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.asset import flags
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class AnalyzeIamPolicyBeta(base.Command):
"""Analyzes IAM policies that match a request."""
detailed_help = {
'DESCRIPTION':
' Analyzes IAM policies that match a request.',
'EXAMPLES':
"""\
To find out which users have been granted the
iam.serviceAccounts.actAs permission on a service account, run:
$ {command} --organization=YOUR_ORG_ID --full-resource-name=YOUR_SERVICE_ACCOUNT_FULL_RESOURCE_NAME --permissions='iam.serviceAccounts.actAs'
To find out which resources a user can access, run:
$ {command} --organization=YOUR_ORG_ID --identity='user:<EMAIL>'
To find out which roles or permissions a user has been granted on a
project, run:
$ {command} --organization=YOUR_ORG_ID --full-resource-name=YOUR_PROJECT_FULL_RESOURCE_NAME --identity='user:<EMAIL>'
To find out which users have been granted the
iam.serviceAccounts.actAs permission on any applicable resources, run:
$ {command} --organization=YOUR_ORG_ID --permissions='iam.serviceAccounts.actAs'
"""
}
_API_VERSION = client_util.V1P4BETA1_API_VERSION
@classmethod
def Args(cls, parser):
flags.AddAnalyzerParentArgs(parser)
flags.AddAnalyzerSelectorsGroup(parser)
flags.AddAnalyzerOptionsGroup(parser, True)
def Run(self, args):
client = client_util.AnalyzeIamPolicyClient(self._API_VERSION)
return client.Analyze(args)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class AnalyzeIamPolicyGA(AnalyzeIamPolicyBeta):
"""Analyzes IAM policies that match a request."""
@classmethod
def Args(cls, parser):
super(AnalyzeIamPolicyGA, cls).Args(parser)
flags.AddAnalyzerConditionContextGroup(parser)
_API_VERSION = client_util.DEFAULT_API_VERSION
| 1.734375 | 2 |
might_useful.py | lyclyc52/nerf_with_slot_attention | 1 | 12771250 | <reponame>lyclyc52/nerf_with_slot_attention<gh_stars>1-10
# For EM
# slots = slots.T
# position = position.T
# with torch.no_grad():
# for i in range(20):
# z = torch.matmul(f, slots) # NxK
# z_p = torch.matmul(f_p, position)
# z = z + w * z_p
# z = F.softmax(z, dim=-1) # NxK
# z_ = z / (1e-6 + z.sum(dim=1, keepdim=True))
# slots = torch.matmul(f.T, z_) # CxK
# slots = _l2norm(slots, dim=0)
# position = torch.matmul(f_p.T, z_)
# position = _l2norm(position, dim=0)
# attn_logits = torch.matmul(f, slots)
# attn = attn_logits.softmax(dim=-1)
# print(attn.shape)
# attn = attn.reshape([B,H,W,num_slots])
# attn = attn.permute([0,3,1,2]) | 1.96875 | 2 |